From ea2df2601e81aa9e490459ad9c65b03de2be61d0 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Thu, 13 Jan 2022 05:21:42 -0500
Subject: [PATCH 0001/2031] std/os/uefi: Use `usingnamespace` to re-export
symbols
`uefi/protocols.zig` and `uefi/tables.zig` just re-exported all the
public symbols, which is basically the purpose of `usingnamespace`
import-wise.
---
lib/std/os/uefi/protocols.zig | 124 ++++++++++------------------------
lib/std/os/uefi/tables.zig | 19 ++----
2 files changed, 39 insertions(+), 104 deletions(-)
diff --git a/lib/std/os/uefi/protocols.zig b/lib/std/os/uefi/protocols.zig
index 4192b4a545..0c027f046b 100644
--- a/lib/std/os/uefi/protocols.zig
+++ b/lib/std/os/uefi/protocols.zig
@@ -1,100 +1,44 @@
-pub const LoadedImageProtocol = @import("protocols/loaded_image_protocol.zig").LoadedImageProtocol;
-pub const loaded_image_device_path_protocol_guid = @import("protocols/loaded_image_protocol.zig").loaded_image_device_path_protocol_guid;
+// Misc
+pub usingnamespace @import("protocols/loaded_image_protocol.zig");
+pub usingnamespace @import("protocols/device_path_protocol.zig");
+pub usingnamespace @import("protocols/rng_protocol.zig");
+pub usingnamespace @import("protocols/shell_parameters_protocol.zig");
-pub const AcpiDevicePath = @import("protocols/device_path_protocol.zig").AcpiDevicePath;
-pub const BiosBootSpecificationDevicePath = @import("protocols/device_path_protocol.zig").BiosBootSpecificationDevicePath;
-pub const DevicePath = @import("protocols/device_path_protocol.zig").DevicePath;
-pub const DevicePathProtocol = @import("protocols/device_path_protocol.zig").DevicePathProtocol;
-pub const DevicePathType = @import("protocols/device_path_protocol.zig").DevicePathType;
-pub const EndDevicePath = @import("protocols/device_path_protocol.zig").EndDevicePath;
-pub const HardwareDevicePath = @import("protocols/device_path_protocol.zig").HardwareDevicePath;
-pub const MediaDevicePath = @import("protocols/device_path_protocol.zig").MediaDevicePath;
-pub const MessagingDevicePath = @import("protocols/device_path_protocol.zig").MessagingDevicePath;
+// Files
+pub usingnamespace @import("protocols/simple_file_system_protocol.zig");
+pub usingnamespace @import("protocols/file_protocol.zig");
-pub const SimpleFileSystemProtocol = @import("protocols/simple_file_system_protocol.zig").SimpleFileSystemProtocol;
-pub const FileProtocol = @import("protocols/file_protocol.zig").FileProtocol;
-pub const FileInfo = @import("protocols/file_protocol.zig").FileInfo;
-pub const FileSystemInfo = @import("protocols/file_protocol.zig").FileSystemInfo;
+// Text
+pub usingnamespace @import("protocols/simple_text_input_protocol.zig");
+pub usingnamespace @import("protocols/simple_text_input_ex_protocol.zig");
+pub usingnamespace @import("protocols/simple_text_output_protocol.zig");
-pub const InputKey = @import("protocols/simple_text_input_ex_protocol.zig").InputKey;
-pub const KeyData = @import("protocols/simple_text_input_ex_protocol.zig").KeyData;
-pub const KeyState = @import("protocols/simple_text_input_ex_protocol.zig").KeyState;
-pub const SimpleTextInputProtocol = @import("protocols/simple_text_input_protocol.zig").SimpleTextInputProtocol;
-pub const SimpleTextInputExProtocol = @import("protocols/simple_text_input_ex_protocol.zig").SimpleTextInputExProtocol;
+// Pointer
+pub usingnamespace @import("protocols/simple_pointer_protocol.zig");
+pub usingnamespace @import("protocols/absolute_pointer_protocol.zig");
-pub const SimpleTextOutputMode = @import("protocols/simple_text_output_protocol.zig").SimpleTextOutputMode;
-pub const SimpleTextOutputProtocol = @import("protocols/simple_text_output_protocol.zig").SimpleTextOutputProtocol;
+pub usingnamespace @import("protocols/graphics_output_protocol.zig");
-pub const SimplePointerMode = @import("protocols/simple_pointer_protocol.zig").SimplePointerMode;
-pub const SimplePointerProtocol = @import("protocols/simple_pointer_protocol.zig").SimplePointerProtocol;
-pub const SimplePointerState = @import("protocols/simple_pointer_protocol.zig").SimplePointerState;
+// edid
+pub usingnamespace @import("protocols/edid_discovered_protocol.zig");
+pub usingnamespace @import("protocols/edid_active_protocol.zig");
+pub usingnamespace @import("protocols/edid_override_protocol.zig");
-pub const AbsolutePointerMode = @import("protocols/absolute_pointer_protocol.zig").AbsolutePointerMode;
-pub const AbsolutePointerProtocol = @import("protocols/absolute_pointer_protocol.zig").AbsolutePointerProtocol;
-pub const AbsolutePointerState = @import("protocols/absolute_pointer_protocol.zig").AbsolutePointerState;
+// Network
+pub usingnamespace @import("protocols/simple_network_protocol.zig");
+pub usingnamespace @import("protocols/managed_network_service_binding_protocol.zig");
+pub usingnamespace @import("protocols/managed_network_protocol.zig");
-pub const GraphicsOutputBltPixel = @import("protocols/graphics_output_protocol.zig").GraphicsOutputBltPixel;
-pub const GraphicsOutputBltOperation = @import("protocols/graphics_output_protocol.zig").GraphicsOutputBltOperation;
-pub const GraphicsOutputModeInformation = @import("protocols/graphics_output_protocol.zig").GraphicsOutputModeInformation;
-pub const GraphicsOutputProtocol = @import("protocols/graphics_output_protocol.zig").GraphicsOutputProtocol;
-pub const GraphicsOutputProtocolMode = @import("protocols/graphics_output_protocol.zig").GraphicsOutputProtocolMode;
-pub const GraphicsPixelFormat = @import("protocols/graphics_output_protocol.zig").GraphicsPixelFormat;
-pub const PixelBitmask = @import("protocols/graphics_output_protocol.zig").PixelBitmask;
+// ip6
+pub usingnamespace @import("protocols/ip6_service_binding_protocol.zig");
+pub usingnamespace @import("protocols/ip6_protocol.zig");
+pub usingnamespace @import("protocols/ip6_config_protocol.zig");
-pub const EdidDiscoveredProtocol = @import("protocols/edid_discovered_protocol.zig").EdidDiscoveredProtocol;
-
-pub const EdidActiveProtocol = @import("protocols/edid_active_protocol.zig").EdidActiveProtocol;
-
-pub const EdidOverrideProtocol = @import("protocols/edid_override_protocol.zig").EdidOverrideProtocol;
-pub const EdidOverrideProtocolAttributes = @import("protocols/edid_override_protocol.zig").EdidOverrideProtocolAttributes;
-
-pub const SimpleNetworkProtocol = @import("protocols/simple_network_protocol.zig").SimpleNetworkProtocol;
-pub const MacAddress = @import("protocols/simple_network_protocol.zig").MacAddress;
-pub const SimpleNetworkMode = @import("protocols/simple_network_protocol.zig").SimpleNetworkMode;
-pub const SimpleNetworkReceiveFilter = @import("protocols/simple_network_protocol.zig").SimpleNetworkReceiveFilter;
-pub const SimpleNetworkState = @import("protocols/simple_network_protocol.zig").SimpleNetworkState;
-pub const NetworkStatistics = @import("protocols/simple_network_protocol.zig").NetworkStatistics;
-pub const SimpleNetworkInterruptStatus = @import("protocols/simple_network_protocol.zig").SimpleNetworkInterruptStatus;
-
-pub const ManagedNetworkServiceBindingProtocol = @import("protocols/managed_network_service_binding_protocol.zig").ManagedNetworkServiceBindingProtocol;
-pub const ManagedNetworkProtocol = @import("protocols/managed_network_protocol.zig").ManagedNetworkProtocol;
-pub const ManagedNetworkConfigData = @import("protocols/managed_network_protocol.zig").ManagedNetworkConfigData;
-pub const ManagedNetworkCompletionToken = @import("protocols/managed_network_protocol.zig").ManagedNetworkCompletionToken;
-pub const ManagedNetworkReceiveData = @import("protocols/managed_network_protocol.zig").ManagedNetworkReceiveData;
-pub const ManagedNetworkTransmitData = @import("protocols/managed_network_protocol.zig").ManagedNetworkTransmitData;
-pub const ManagedNetworkFragmentData = @import("protocols/managed_network_protocol.zig").ManagedNetworkFragmentData;
-
-pub const Ip6ServiceBindingProtocol = @import("protocols/ip6_service_binding_protocol.zig").Ip6ServiceBindingProtocol;
-pub const Ip6Protocol = @import("protocols/ip6_protocol.zig").Ip6Protocol;
-pub const Ip6ModeData = @import("protocols/ip6_protocol.zig").Ip6ModeData;
-pub const Ip6ConfigData = @import("protocols/ip6_protocol.zig").Ip6ConfigData;
-pub const Ip6Address = @import("protocols/ip6_protocol.zig").Ip6Address;
-pub const Ip6AddressInfo = @import("protocols/ip6_protocol.zig").Ip6AddressInfo;
-pub const Ip6RouteTable = @import("protocols/ip6_protocol.zig").Ip6RouteTable;
-pub const Ip6NeighborState = @import("protocols/ip6_protocol.zig").Ip6NeighborState;
-pub const Ip6NeighborCache = @import("protocols/ip6_protocol.zig").Ip6NeighborCache;
-pub const Ip6IcmpType = @import("protocols/ip6_protocol.zig").Ip6IcmpType;
-pub const Ip6CompletionToken = @import("protocols/ip6_protocol.zig").Ip6CompletionToken;
-
-pub const Ip6ConfigProtocol = @import("protocols/ip6_config_protocol.zig").Ip6ConfigProtocol;
-pub const Ip6ConfigDataType = @import("protocols/ip6_config_protocol.zig").Ip6ConfigDataType;
-
-pub const Udp6ServiceBindingProtocol = @import("protocols/udp6_service_binding_protocol.zig").Udp6ServiceBindingProtocol;
-pub const Udp6Protocol = @import("protocols/udp6_protocol.zig").Udp6Protocol;
-pub const Udp6ConfigData = @import("protocols/udp6_protocol.zig").Udp6ConfigData;
-pub const Udp6CompletionToken = @import("protocols/udp6_protocol.zig").Udp6CompletionToken;
-pub const Udp6ReceiveData = @import("protocols/udp6_protocol.zig").Udp6ReceiveData;
-pub const Udp6TransmitData = @import("protocols/udp6_protocol.zig").Udp6TransmitData;
-pub const Udp6SessionData = @import("protocols/udp6_protocol.zig").Udp6SessionData;
-pub const Udp6FragmentData = @import("protocols/udp6_protocol.zig").Udp6FragmentData;
+// udp6
+pub usingnamespace @import("protocols/udp6_service_binding_protocol.zig");
+pub usingnamespace @import("protocols/udp6_protocol.zig");
+// hii
pub const hii = @import("protocols/hii.zig");
-pub const HIIDatabaseProtocol = @import("protocols/hii_database_protocol.zig").HIIDatabaseProtocol;
-pub const HIIPopupProtocol = @import("protocols/hii_popup_protocol.zig").HIIPopupProtocol;
-pub const HIIPopupStyle = @import("protocols/hii_popup_protocol.zig").HIIPopupStyle;
-pub const HIIPopupType = @import("protocols/hii_popup_protocol.zig").HIIPopupType;
-pub const HIIPopupSelection = @import("protocols/hii_popup_protocol.zig").HIIPopupSelection;
-
-pub const RNGProtocol = @import("protocols/rng_protocol.zig").RNGProtocol;
-
-pub const ShellParametersProtocol = @import("protocols/shell_parameters_protocol.zig").ShellParametersProtocol;
+pub usingnamespace @import("protocols/hii_database_protocol.zig");
+pub usingnamespace @import("protocols/hii_popup_protocol.zig");
diff --git a/lib/std/os/uefi/tables.zig b/lib/std/os/uefi/tables.zig
index 0011c80a9c..a65e5789c7 100644
--- a/lib/std/os/uefi/tables.zig
+++ b/lib/std/os/uefi/tables.zig
@@ -1,14 +1,5 @@
-pub const AllocateType = @import("tables/boot_services.zig").AllocateType;
-pub const BootServices = @import("tables/boot_services.zig").BootServices;
-pub const ConfigurationTable = @import("tables/configuration_table.zig").ConfigurationTable;
-pub const global_variable align(8) = @import("tables/runtime_services.zig").global_variable;
-pub const LocateSearchType = @import("tables/boot_services.zig").LocateSearchType;
-pub const MemoryDescriptor = @import("tables/boot_services.zig").MemoryDescriptor;
-pub const MemoryType = @import("tables/boot_services.zig").MemoryType;
-pub const OpenProtocolAttributes = @import("tables/boot_services.zig").OpenProtocolAttributes;
-pub const ProtocolInformationEntry = @import("tables/boot_services.zig").ProtocolInformationEntry;
-pub const ResetType = @import("tables/runtime_services.zig").ResetType;
-pub const RuntimeServices = @import("tables/runtime_services.zig").RuntimeServices;
-pub const SystemTable = @import("tables/system_table.zig").SystemTable;
-pub const TableHeader = @import("tables/table_header.zig").TableHeader;
-pub const TimerDelay = @import("tables/boot_services.zig").TimerDelay;
+pub usingnamespace @import("tables/boot_services.zig");
+pub usingnamespace @import("tables/runtime_services.zig");
+pub usingnamespace @import("tables/configuration_table.zig");
+pub usingnamespace @import("tables/system_table.zig");
+pub usingnamespace @import("tables/table_header.zig");
From 649b872450689511ac3c5526d063c89fff7d898a Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Thu, 13 Jan 2022 05:23:39 -0500
Subject: [PATCH 0002/2031] std/builtin: improve panic handler for uefi
Writes the panic message to stderr as well as passing it to
boot_services.exit when boot_services is available.
---
lib/std/builtin.zig | 47 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 46 insertions(+), 1 deletion(-)
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 7fcd9a369b..b69b31ae17 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -769,7 +769,52 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn
std.os.abort();
},
.uefi => {
- // TODO look into using the debug info and logging helpful messages
+ const uefi = std.os.uefi;
+
+ const ExitData = struct {
+ pub fn create_exit_data(exit_msg: []const u8, exit_size: *usize) ![*:0]u16 {
+ // Need boot services for pool allocation
+ if (uefi.system_table.boot_services == null) {
+ return error.BootServicesUnavailable;
+ }
+
+ // ExitData buffer must be allocated using boot_services.allocatePool
+ var utf16: []u16 = try uefi.raw_pool_allocator.alloc(u16, 256);
+ errdefer uefi.raw_pool_allocator.free(utf16);
+
+ if (exit_msg.len > 255) {
+ return error.MessageTooLong;
+ }
+
+ var fmt: [256]u8 = undefined;
+ var slice = try std.fmt.bufPrint(&fmt, "\r\nerr: {s}\r\n", .{exit_msg});
+
+ var len = try std.unicode.utf8ToUtf16Le(utf16, slice);
+
+ utf16[len] = 0;
+
+ exit_size.* = 256;
+
+ return @ptrCast([*:0]u16, utf16.ptr);
+ }
+ };
+
+ var exit_size: usize = 0;
+ var exit_data = ExitData.create_exit_data(msg, &exit_size) catch null;
+
+ if (exit_data) |data| {
+ if (uefi.system_table.std_err) |out| {
+ _ = out.setAttribute(uefi.protocols.SimpleTextOutputProtocol.red);
+ _ = out.outputString(data);
+ _ = out.setAttribute(uefi.protocols.SimpleTextOutputProtocol.white);
+ }
+ }
+
+ if (uefi.system_table.boot_services) |bs| {
+ _ = bs.exit(uefi.handle, .Aborted, exit_size, exit_data);
+ }
+
+ // Didn't have boot_services, just fallback to whatever.
std.os.abort();
},
else => {
From fe28cb8261eeb7e3094d38f2c5fb2012fbf205d6 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Thu, 13 Jan 2022 05:54:24 -0500
Subject: [PATCH 0003/2031] std/os/uefi: Fill out remaining function signatures
and docs on boot_services
---
lib/std/os/uefi/tables/boot_services.zig | 44 ++++++++++++++++++------
1 file changed, 34 insertions(+), 10 deletions(-)
diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig
index 75daf6feb2..a41f2dcdb9 100644
--- a/lib/std/os/uefi/tables/boot_services.zig
+++ b/lib/std/os/uefi/tables/boot_services.zig
@@ -59,23 +59,34 @@ pub const BootServices = extern struct {
/// Checks whether an event is in the signaled state.
checkEvent: fn (Event) callconv(.C) Status,
- installProtocolInterface: Status, // TODO
- reinstallProtocolInterface: Status, // TODO
- uninstallProtocolInterface: Status, // TODO
+ /// Installs a protocol interface on a device handle. If the handle does not exist, it is created
+ /// and added to the list of handles in the system. installMultipleProtocolInterfaces()
+ /// performs more error checking than installProtocolInterface(), so its use is recommended over this.
+ installProtocolInterface: fn (Handle, *align(8) const Guid, EfiInterfaceType, *anyopaque) callconv(.C) Status,
+
+ /// Reinstalls a protocol interface on a device handle
+ reinstallProtocolInterface: fn (Handle, *align(8) const Guid, *anyopaque, *anyopaque) callconv(.C) Status,
+
+ /// Removes a protocol interface from a device handle. Usage of
+ /// uninstallMultipleProtocolInterfaces is recommended over this.
+ uninstallProtocolInterface: fn (Handle, *align(8) const Guid, *anyopaque) callconv(.C) Status,
/// Queries a handle to determine if it supports a specified protocol.
handleProtocol: fn (Handle, *align(8) const Guid, *?*anyopaque) callconv(.C) Status,
reserved: *anyopaque,
- registerProtocolNotify: Status, // TODO
+ /// Creates an event that is to be signaled whenever an interface is installed for a specified protocol.
+ registerProtocolNotify: fn (*align(8) const Guid, Event, **anyopaque) callconv(.C) Status,
/// Returns an array of handles that support a specified protocol.
locateHandle: fn (LocateSearchType, ?*align(8) const Guid, ?*const anyopaque, *usize, [*]Handle) callconv(.C) Status,
/// Locates the handle to a device on the device path that supports the specified protocol
locateDevicePath: fn (*align(8) const Guid, **const DevicePathProtocol, *?Handle) callconv(.C) Status,
- installConfigurationTable: Status, // TODO
+
+ /// Adds, updates, or removes a configuration table entry from the EFI System Table.
+ installConfigurationTable: fn (*align(8) const Guid, ?*anyopaque) callconv(.C) Status,
/// Loads an EFI image into memory.
loadImage: fn (bool, Handle, ?*const DevicePathProtocol, ?[*]const u8, usize, *?Handle) callconv(.C) Status,
@@ -101,8 +112,11 @@ pub const BootServices = extern struct {
/// Sets the system's watchdog timer.
setWatchdogTimer: fn (usize, u64, usize, ?[*]const u16) callconv(.C) Status,
- connectController: Status, // TODO
- disconnectController: Status, // TODO
+ /// Connects one or more drives to a controller.
+ connectController: fn (Handle, ?Handle, ?*DevicePathProtocol, bool) callconv(.C) Status,
+
+ // Disconnects one or more drivers from a controller
+ disconnectController: fn (Handle, ?Handle, ?Handle) callconv(.C) Status,
/// Queries a handle to determine if it supports a specified protocol.
openProtocol: fn (Handle, *align(8) const Guid, *?*anyopaque, ?Handle, ?Handle, OpenProtocolAttributes) callconv(.C) Status,
@@ -122,8 +136,11 @@ pub const BootServices = extern struct {
/// Returns the first protocol instance that matches the given protocol.
locateProtocol: fn (*align(8) const Guid, ?*const anyopaque, *?*anyopaque) callconv(.C) Status,
- installMultipleProtocolInterfaces: Status, // TODO
- uninstallMultipleProtocolInterfaces: Status, // TODO
+ /// Installs one or more protocol interfaces into the boot services environment
+ installMultipleProtocolInterfaces: fn (*Handle, ...) callconv(.C) Status,
+
+ /// Removes one or more protocol interfaces into the boot services environment
+ uninstallMultipleProtocolInterfaces: fn (*Handle, ...) callconv(.C) Status,
/// Computes and returns a 32-bit CRC for a data buffer.
calculateCrc32: fn ([*]const u8, usize, *u32) callconv(.C) Status,
@@ -134,7 +151,8 @@ pub const BootServices = extern struct {
/// Fills a buffer with a specified value
setMem: fn ([*]u8, usize, u8) callconv(.C) void,
- createEventEx: Status, // TODO
+ /// Creates an event in a group.
+ createEventEx: fn (u32, usize, EfiEventNotify, *const anyopaque, *align(8) const Guid, *Event) callconv(.C) Status,
pub const signature: u64 = 0x56524553544f4f42;
@@ -151,6 +169,8 @@ pub const BootServices = extern struct {
pub const tpl_high_level: usize = 31;
};
+pub const EfiEventNotify = fn (event: Event, ctx: *anyopaque) callconv(.C) void;
+
pub const TimerDelay = enum(u32) {
TimerCancel,
TimerPeriodic,
@@ -231,6 +251,10 @@ pub const ProtocolInformationEntry = extern struct {
open_count: u32,
};
+pub const EfiInterfaceType = enum(u32) {
+ EfiNativeInterface,
+};
+
pub const AllocateType = enum(u32) {
AllocateAnyPages,
AllocateMaxAddress,
From 88687645b2a5c34304c36ded31d5164784aa207a Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Thu, 13 Jan 2022 11:19:22 -0500
Subject: [PATCH 0004/2031] std/os/uefi: Fill out remaining runtime services
and add parameter names
---
lib/std/os/uefi/tables/runtime_services.zig | 60 ++++++++++++++++-----
1 file changed, 46 insertions(+), 14 deletions(-)
diff --git a/lib/std/os/uefi/tables/runtime_services.zig b/lib/std/os/uefi/tables/runtime_services.zig
index 1250894180..f238951553 100644
--- a/lib/std/os/uefi/tables/runtime_services.zig
+++ b/lib/std/os/uefi/tables/runtime_services.zig
@@ -18,39 +18,71 @@ pub const RuntimeServices = extern struct {
hdr: TableHeader,
/// Returns the current time and date information, and the time-keeping capabilities of the hardware platform.
- getTime: fn (*uefi.Time, ?*TimeCapabilities) callconv(.C) Status,
+ getTime: fn (time: *uefi.Time, capabilities: ?*TimeCapabilities) callconv(.C) Status,
- setTime: Status, // TODO
- getWakeupTime: Status, // TODO
- setWakeupTime: Status, // TODO
+ /// Sets the current local time and date information
+ setTime: fn (time: *uefi.Time) callconv(.C) Status,
+
+ /// Returns the current wakeup alarm clock setting
+ getWakeupTime: fn (enabled: *bool, pending: *bool, time: *uefi.Time) callconv(.C) Status,
+
+ /// Sets the system wakeup alarm clock time
+ setWakeupTime: fn (enable: *bool, time: ?*uefi.Time) callconv(.C) Status,
/// Changes the runtime addressing mode of EFI firmware from physical to virtual.
- setVirtualAddressMap: fn (usize, usize, u32, [*]MemoryDescriptor) callconv(.C) Status,
+ setVirtualAddressMap: fn (mmap_size: usize, descriptor_size: usize, descriptor_version: u32, virtual_map: [*]MemoryDescriptor) callconv(.C) Status,
/// Determines the new virtual address that is to be used on subsequent memory accesses.
- convertPointer: fn (usize, **anyopaque) callconv(.C) Status,
+ convertPointer: fn (debug_disposition: usize, address: **anyopaque) callconv(.C) Status,
/// Returns the value of a variable.
- getVariable: fn ([*:0]const u16, *align(8) const Guid, ?*u32, *usize, ?*anyopaque) callconv(.C) Status,
+ getVariable: fn (var_name: [*:0]const u16, vendor_guid: *align(8) const Guid, attributes: ?*u32, data_size: *usize, data: ?*anyopaque) callconv(.C) Status,
/// Enumerates the current variable names.
- getNextVariableName: fn (*usize, [*:0]u16, *align(8) Guid) callconv(.C) Status,
+ getNextVariableName: fn (var_name_size: *usize, var_name: [*:0]u16, vendor_guid: *align(8) Guid) callconv(.C) Status,
/// Sets the value of a variable.
- setVariable: fn ([*:0]const u16, *align(8) const Guid, u32, usize, *anyopaque) callconv(.C) Status,
+ setVariable: fn (var_name: [*:0]const u16, vendor_guid: *align(8) const Guid, attributes: u32, data_size: usize, data: *anyopaque) callconv(.C) Status,
- getNextHighMonotonicCount: Status, // TODO
+ /// Return the next high 32 bits of the platform's monotonic counter
+ getNextHighMonotonicCount: fn (high_count: *u32) callconv(.C) Status,
/// Resets the entire platform.
- resetSystem: fn (ResetType, Status, usize, ?*const anyopaque) callconv(.C) noreturn,
+ resetSystem: fn (reset_type: ResetType, reset_status: Status, data_size: usize, reset_data: ?*const anyopaque) callconv(.C) noreturn,
- updateCapsule: Status, // TODO
- queryCapsuleCapabilities: Status, // TODO
- queryVariableInfo: Status, // TODO
+ /// Passes capsules to the firmware with both virtual and physical mapping.
+ /// Depending on the intended consumption, the firmware may process the capsule immediately.
+ /// If the payload should persist across a system reset, the reset value returned from
+ /// `queryCapsuleCapabilities` must be passed into resetSystem and will cause the capsule
+ /// to be processed by the firmware as part of the reset process.
+ updateCapsule: fn (capsule_header_array: **CapsuleHeader, capsule_count: usize, scatter_gather_list: EfiPhysicalAddress) callconv(.C) Status,
+
+ /// Returns if the capsule can be supported via `updateCapsule`
+ queryCapsuleCapabilities: fn (capsule_header_array: **CapsuleHeader, capsule_count: usize, maximum_capsule_size: *usize, resetType: ResetType) callconv(.C) Status,
+
+ /// Returns information about the EFI variables
+ queryVariableInfo: fn (attributes: *u32, maximum_variable_storage_size: *u64, remaining_variable_storage_size: *u64, maximum_variable_size: *u64) callconv(.C) Status,
pub const signature: u64 = 0x56524553544e5552;
};
+const EfiPhysicalAddress = u64;
+
+pub const CapsuleHeader = extern struct {
+ capsuleGuid: Guid align(8),
+ headerSize: u32,
+ flags: u32,
+ capsuleImageSize: u32,
+};
+
+pub const UefiCapsuleBlockDescriptor = extern struct {
+ length: u64,
+ address: union {
+ dataBlock: EfiPhysicalAddress,
+ continuationPointer: EfiPhysicalAddress,
+ },
+};
+
pub const ResetType = enum(u32) {
ResetCold,
ResetWarm,
From 322757c4e13acbe866e6b3431cd3b6b8ce68efb7 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Thu, 13 Jan 2022 11:48:25 -0500
Subject: [PATCH 0005/2031] std/os/uefi: Add parameter names to boot_services
---
lib/std/os/uefi/tables/boot_services.zig | 86 ++++++++++++------------
1 file changed, 43 insertions(+), 43 deletions(-)
diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig
index a41f2dcdb9..7da007b05d 100644
--- a/lib/std/os/uefi/tables/boot_services.zig
+++ b/lib/std/os/uefi/tables/boot_services.zig
@@ -21,138 +21,138 @@ pub const BootServices = extern struct {
hdr: TableHeader,
/// Raises a task's priority level and returns its previous level.
- raiseTpl: fn (usize) callconv(.C) usize,
+ raiseTpl: fn (new_tpl: usize) callconv(.C) usize,
/// Restores a task's priority level to its previous value.
- restoreTpl: fn (usize) callconv(.C) void,
+ restoreTpl: fn (old_tpl: usize) callconv(.C) void,
/// Allocates memory pages from the system.
- allocatePages: fn (AllocateType, MemoryType, usize, *[*]align(4096) u8) callconv(.C) Status,
+ allocatePages: fn (alloc_type: AllocateType, mem_type: MemoryType, pages: usize, memory: *[*]align(4096) u8) callconv(.C) Status,
/// Frees memory pages.
- freePages: fn ([*]align(4096) u8, usize) callconv(.C) Status,
+ freePages: fn (memory: [*]align(4096) u8, pages: usize) callconv(.C) Status,
/// Returns the current memory map.
- getMemoryMap: fn (*usize, [*]MemoryDescriptor, *usize, *usize, *u32) callconv(.C) Status,
+ getMemoryMap: fn (mmap_size: *usize, mmap: [*]MemoryDescriptor, mapKey: *usize, descriptor_size: *usize, descriptor_version: *u32) callconv(.C) Status,
/// Allocates pool memory.
- allocatePool: fn (MemoryType, usize, *[*]align(8) u8) callconv(.C) Status,
+ allocatePool: fn (pool_type: MemoryType, size: usize, buffer: *[*]align(8) u8) callconv(.C) Status,
/// Returns pool memory to the system.
- freePool: fn ([*]align(8) u8) callconv(.C) Status,
+ freePool: fn (buffer: [*]align(8) u8) callconv(.C) Status,
/// Creates an event.
- createEvent: fn (u32, usize, ?fn (Event, ?*anyopaque) callconv(.C) void, ?*const anyopaque, *Event) callconv(.C) Status,
+ createEvent: fn (type: u32, notify_tpl: usize, notify_func: ?fn (Event, ?*anyopaque) callconv(.C) void, notifyCtx: ?*const anyopaque, event: *Event) callconv(.C) Status,
/// Sets the type of timer and the trigger time for a timer event.
- setTimer: fn (Event, TimerDelay, u64) callconv(.C) Status,
+ setTimer: fn (event: Event, type: TimerDelay, triggerTime: u64) callconv(.C) Status,
/// Stops execution until an event is signaled.
- waitForEvent: fn (usize, [*]const Event, *usize) callconv(.C) Status,
+ waitForEvent: fn (event_len: usize, events: [*]const Event, index: *usize) callconv(.C) Status,
/// Signals an event.
- signalEvent: fn (Event) callconv(.C) Status,
+ signalEvent: fn (event: Event) callconv(.C) Status,
/// Closes an event.
- closeEvent: fn (Event) callconv(.C) Status,
+ closeEvent: fn (event: Event) callconv(.C) Status,
/// Checks whether an event is in the signaled state.
- checkEvent: fn (Event) callconv(.C) Status,
+ checkEvent: fn (event: Event) callconv(.C) Status,
/// Installs a protocol interface on a device handle. If the handle does not exist, it is created
/// and added to the list of handles in the system. installMultipleProtocolInterfaces()
/// performs more error checking than installProtocolInterface(), so its use is recommended over this.
- installProtocolInterface: fn (Handle, *align(8) const Guid, EfiInterfaceType, *anyopaque) callconv(.C) Status,
+ installProtocolInterface: fn (handle: Handle, protocol: *align(8) const Guid, interface_type: EfiInterfaceType, interface: *anyopaque) callconv(.C) Status,
/// Reinstalls a protocol interface on a device handle
- reinstallProtocolInterface: fn (Handle, *align(8) const Guid, *anyopaque, *anyopaque) callconv(.C) Status,
+ reinstallProtocolInterface: fn (handle: Handle, protocol: *align(8) const Guid, old_interface: *anyopaque, new_interface: *anyopaque) callconv(.C) Status,
/// Removes a protocol interface from a device handle. Usage of
/// uninstallMultipleProtocolInterfaces is recommended over this.
- uninstallProtocolInterface: fn (Handle, *align(8) const Guid, *anyopaque) callconv(.C) Status,
+ uninstallProtocolInterface: fn (handle: Handle, protocol: *align(8) const Guid, interface: *anyopaque) callconv(.C) Status,
/// Queries a handle to determine if it supports a specified protocol.
- handleProtocol: fn (Handle, *align(8) const Guid, *?*anyopaque) callconv(.C) Status,
+ handleProtocol: fn (handle: Handle, protocol: *align(8) const Guid, interface: *?*anyopaque) callconv(.C) Status,
reserved: *anyopaque,
/// Creates an event that is to be signaled whenever an interface is installed for a specified protocol.
- registerProtocolNotify: fn (*align(8) const Guid, Event, **anyopaque) callconv(.C) Status,
+ registerProtocolNotify: fn (protocol: *align(8) const Guid, event: Event, registration: **anyopaque) callconv(.C) Status,
/// Returns an array of handles that support a specified protocol.
- locateHandle: fn (LocateSearchType, ?*align(8) const Guid, ?*const anyopaque, *usize, [*]Handle) callconv(.C) Status,
+ locateHandle: fn (search_type: LocateSearchType, protocol: ?*align(8) const Guid, search_key: ?*const anyopaque, bufferSize: *usize, buffer: [*]Handle) callconv(.C) Status,
/// Locates the handle to a device on the device path that supports the specified protocol
- locateDevicePath: fn (*align(8) const Guid, **const DevicePathProtocol, *?Handle) callconv(.C) Status,
+ locateDevicePath: fn (protocols: *align(8) const Guid, device_path: **const DevicePathProtocol, device: *?Handle) callconv(.C) Status,
/// Adds, updates, or removes a configuration table entry from the EFI System Table.
- installConfigurationTable: fn (*align(8) const Guid, ?*anyopaque) callconv(.C) Status,
+ installConfigurationTable: fn (guid: *align(8) const Guid, table: ?*anyopaque) callconv(.C) Status,
/// Loads an EFI image into memory.
- loadImage: fn (bool, Handle, ?*const DevicePathProtocol, ?[*]const u8, usize, *?Handle) callconv(.C) Status,
+ loadImage: fn (boot_policy: bool, parent_image_handle: Handle, device_path: ?*const DevicePathProtocol, source_buffer: ?[*]const u8, source_size: usize, imageHandle: *?Handle) callconv(.C) Status,
/// Transfers control to a loaded image's entry point.
- startImage: fn (Handle, ?*usize, ?*[*]u16) callconv(.C) Status,
+ startImage: fn (image_handle: Handle, exit_data_size: ?*usize, exit_data: ?*[*]u16) callconv(.C) Status,
/// Terminates a loaded EFI image and returns control to boot services.
- exit: fn (Handle, Status, usize, ?*const anyopaque) callconv(.C) Status,
+ exit: fn (image_handle: Handle, exit_status: Status, exit_data_size: usize, exit_data: ?*const anyopaque) callconv(.C) Status,
/// Unloads an image.
- unloadImage: fn (Handle) callconv(.C) Status,
+ unloadImage: fn (image_handle: Handle) callconv(.C) Status,
/// Terminates all boot services.
- exitBootServices: fn (Handle, usize) callconv(.C) Status,
+ exitBootServices: fn (image_handle: Handle, map_key: usize) callconv(.C) Status,
/// Returns a monotonically increasing count for the platform.
- getNextMonotonicCount: fn (*u64) callconv(.C) Status,
+ getNextMonotonicCount: fn (count: *u64) callconv(.C) Status,
/// Induces a fine-grained stall.
- stall: fn (usize) callconv(.C) Status,
+ stall: fn (microseconds: usize) callconv(.C) Status,
/// Sets the system's watchdog timer.
- setWatchdogTimer: fn (usize, u64, usize, ?[*]const u16) callconv(.C) Status,
+ setWatchdogTimer: fn (timeout: usize, watchdogCode: u64, data_size: usize, watchdog_data: ?[*]const u16) callconv(.C) Status,
/// Connects one or more drives to a controller.
- connectController: fn (Handle, ?Handle, ?*DevicePathProtocol, bool) callconv(.C) Status,
+ connectController: fn (controller_handle: Handle, driver_image_handle: ?Handle, remaining_device_path: ?*DevicePathProtocol, recursive: bool) callconv(.C) Status,
// Disconnects one or more drivers from a controller
- disconnectController: fn (Handle, ?Handle, ?Handle) callconv(.C) Status,
+ disconnectController: fn (controller_handle: Handle, driver_image_handle: ?Handle, child_handle: ?Handle) callconv(.C) Status,
/// Queries a handle to determine if it supports a specified protocol.
- openProtocol: fn (Handle, *align(8) const Guid, *?*anyopaque, ?Handle, ?Handle, OpenProtocolAttributes) callconv(.C) Status,
+ openProtocol: fn (handle: Handle, protocol: *align(8) const Guid, interface: *?*anyopaque, agent_handle: ?Handle, controller_handle: ?Handle, attributes: OpenProtocolAttributes) callconv(.C) Status,
/// Closes a protocol on a handle that was opened using openProtocol().
- closeProtocol: fn (Handle, *align(8) const Guid, Handle, ?Handle) callconv(.C) Status,
+ closeProtocol: fn (handle: Handle, protocol: *align(8) const Guid, agentHandle: Handle, controller_handle: ?Handle) callconv(.C) Status,
/// Retrieves the list of agents that currently have a protocol interface opened.
- openProtocolInformation: fn (Handle, *align(8) const Guid, *[*]ProtocolInformationEntry, *usize) callconv(.C) Status,
+ openProtocolInformation: fn (handle: Handle, protocol: *align(8) const Guid, entry_buffer: *[*]ProtocolInformationEntry, entry_count: *usize) callconv(.C) Status,
/// Retrieves the list of protocol interface GUIDs that are installed on a handle in a buffer allocated from pool.
- protocolsPerHandle: fn (Handle, *[*]*align(8) const Guid, *usize) callconv(.C) Status,
+ protocolsPerHandle: fn (handle: Handle, protocol_buffer: *[*]*align(8) const Guid, protocol_buffer_count: *usize) callconv(.C) Status,
/// Returns an array of handles that support the requested protocol in a buffer allocated from pool.
- locateHandleBuffer: fn (LocateSearchType, ?*align(8) const Guid, ?*const anyopaque, *usize, *[*]Handle) callconv(.C) Status,
+ locateHandleBuffer: fn (search_type: LocateSearchType, protocol: ?*align(8) const Guid, search_key: ?*const anyopaque, num_handles: *usize, buffer: *[*]Handle) callconv(.C) Status,
/// Returns the first protocol instance that matches the given protocol.
- locateProtocol: fn (*align(8) const Guid, ?*const anyopaque, *?*anyopaque) callconv(.C) Status,
+ locateProtocol: fn (protocol: *align(8) const Guid, registration: ?*const anyopaque, interface: *?*anyopaque) callconv(.C) Status,
/// Installs one or more protocol interfaces into the boot services environment
- installMultipleProtocolInterfaces: fn (*Handle, ...) callconv(.C) Status,
+ installMultipleProtocolInterfaces: fn (handle: *Handle, ...) callconv(.C) Status,
/// Removes one or more protocol interfaces into the boot services environment
- uninstallMultipleProtocolInterfaces: fn (*Handle, ...) callconv(.C) Status,
+ uninstallMultipleProtocolInterfaces: fn (handle: *Handle, ...) callconv(.C) Status,
/// Computes and returns a 32-bit CRC for a data buffer.
- calculateCrc32: fn ([*]const u8, usize, *u32) callconv(.C) Status,
+ calculateCrc32: fn (data: [*]const u8, data_size: usize, *u32) callconv(.C) Status,
/// Copies the contents of one buffer to another buffer
- copyMem: fn ([*]u8, [*]const u8, usize) callconv(.C) void,
+ copyMem: fn (dest: [*]u8, src: [*]const u8, len: usize) callconv(.C) void,
/// Fills a buffer with a specified value
- setMem: fn ([*]u8, usize, u8) callconv(.C) void,
+ setMem: fn (buffer: [*]u8, size: usize, value: u8) callconv(.C) void,
/// Creates an event in a group.
- createEventEx: fn (u32, usize, EfiEventNotify, *const anyopaque, *align(8) const Guid, *Event) callconv(.C) Status,
+ createEventEx: fn (type: u32, notify_tpl: usize, notify_func: EfiEventNotify, notify_ctx: *const anyopaque, event_group: *align(8) const Guid, event: *Event) callconv(.C) Status,
pub const signature: u64 = 0x56524553544f4f42;
From ae084c5f59b0436a8fcb1b924008a16d90a9fe86 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Thu, 13 Jan 2022 15:44:29 -0500
Subject: [PATCH 0006/2031] std/os/uefi: Complete AcpiDevicePath and
HardwareDevicePaths
---
.../uefi/protocols/device_path_protocol.zig | 61 +++++++++++++++----
1 file changed, 49 insertions(+), 12 deletions(-)
diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig
index df3812451c..7a31b923ac 100644
--- a/lib/std/os/uefi/protocols/device_path_protocol.zig
+++ b/lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -87,7 +87,10 @@ pub const DevicePathProtocol = packed struct {
},
.Acpi => blk: {
const acpi: ?AcpiDevicePath = switch (@intToEnum(AcpiDevicePath.Subtype, self.subtype)) {
- else => null, // TODO
+ .Acpi => .{ .Acpi = @ptrCast(*const AcpiDevicePath.BaseAcpiDevicePath, self) },
+ .ExpandedAcpi => .{ .ExpandedAcpi = @ptrCast(*const AcpiDevicePath.ExpandedAcpiDevicePath, self) },
+ .Adr => .{ .Adr = @ptrCast(*const AcpiDevicePath.AdrDevicePath, self) },
+ _ => null,
};
break :blk if (acpi) |a| .{ .Acpi = a } else null;
},
@@ -173,58 +176,92 @@ pub const HardwareDevicePath = union(Subtype) {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ function: u8,
+ device: u8,
};
pub const PcCardDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ function_number: u8,
};
pub const MemoryMappedDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ memory_type: u32,
+ start_address: u64,
+ end_address: u64,
};
pub const VendorDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ vendor_guid: Guid,
};
pub const ControllerDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ controller_number: u32,
};
pub const BmcDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ interface_type: u8,
+ base_address: usize,
};
};
pub const AcpiDevicePath = union(Subtype) {
- Acpi: void, // TODO
- ExpandedAcpi: void, // TODO
- Adr: void, // TODO
- Nvdimm: void, // TODO
+ Acpi: *const BaseAcpiDevicePath,
+ ExpandedAcpi: *const ExpandedAcpiDevicePath,
+ Adr: *const AdrDevicePath,
pub const Subtype = enum(u8) {
Acpi = 1,
ExpandedAcpi = 2,
Adr = 3,
- Nvdimm = 4,
_,
};
+
+ pub const BaseAcpiDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ hid: u32,
+ uid: u32,
+ };
+
+ pub const ExpandedAcpiDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ hid: u32,
+ uid: u32,
+ cid: u32,
+ // variable length u16[*:0] strings
+ // hid_str, uid_str, cid_str
+ };
+
+ pub const AdrDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ adr: u32,
+ // multiple adr entries can optionally follow
+ pub fn adrs(self: *const AdrDevicePath) []const u32 {
+ // self.length is a minimum of 8 with one adr which is size 4.
+ var entries = (self.length - 4) / @sizeOf(u32);
+ return @ptrCast([*]const u32, &self.adr)[0..entries];
+ }
+ };
};
pub const MessagingDevicePath = union(Subtype) {
From a2a2601da577dd424ea585da0449288a439871fe Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Fri, 14 Jan 2022 07:02:34 -0500
Subject: [PATCH 0007/2031] std/os/uefi: Complete DevicePathProtocol types
---
lib/std/os/uefi.zig | 12 +
.../uefi/protocols/device_path_protocol.zig | 294 ++++++++++++++++--
2 files changed, 283 insertions(+), 23 deletions(-)
diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig
index 4c034ed456..1bceab5b2a 100644
--- a/lib/std/os/uefi.zig
+++ b/lib/std/os/uefi.zig
@@ -23,6 +23,18 @@ pub var system_table: *tables.SystemTable = undefined;
/// A handle to an event structure.
pub const Event = *opaque {};
+pub const MacAddress = extern struct {
+ address: [32]u8,
+};
+
+pub const Ipv4Address = extern struct {
+ address: [4]u8,
+};
+
+pub const Ipv6Address = extern struct {
+ address: [16]u8,
+};
+
/// GUIDs must be align(8)
pub const Guid = extern struct {
time_low: u32,
diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig
index 7a31b923ac..a6509bb14f 100644
--- a/lib/std/os/uefi/protocols/device_path_protocol.zig
+++ b/lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -265,24 +265,24 @@ pub const AcpiDevicePath = union(Subtype) {
};
pub const MessagingDevicePath = union(Subtype) {
- Atapi: void, // TODO
- Scsi: void, // TODO
- FibreChannel: void, // TODO
- FibreChannelEx: void, // TODO
- @"1394": void, // TODO
- Usb: void, // TODO
- Sata: void, // TODO
- UsbWwid: void, // TODO
- Lun: void, // TODO
- UsbClass: void, // TODO
- I2o: void, // TODO
- MacAddress: void, // TODO
- Ipv4: void, // TODO
- Ipv6: void, // TODO
- Vlan: void, // TODO
- InfiniBand: void, // TODO
- Uart: void, // TODO
- Vendor: void, // TODO
+ Atapi: *const AtapiDevicePath,
+ Scsi: *const ScsiDevicePath,
+ FibreChannel: *const FibreChannelDevicePath,
+ FibreChannelEx: FibreChannelExDevicePath,
+ @"1394": *const F1394DevicePath,
+ Usb: *const UsbDevicePath,
+ Sata: *const SataDevicePath,
+ UsbWwid: *const UsbWwidDevicePath,
+ Lun: *const DeviceLogicalUnitDevicePath,
+ UsbClass: *const UsbClassDevicePath,
+ I2o: *const I2oDevicePath,
+ MacAddress: *const MacAddressDevicePath,
+ Ipv4: *const Ipv4DevicePath,
+ Ipv6: *const Ipv6DevicePath,
+ Vlan: *const VlanDevicePath,
+ InfiniBand: *const InfiniBandDevicePath,
+ Uart: *const UartDevicePath,
+ Vendor: *const VendorDefinedDevicePath,
pub const Subtype = enum(u8) {
Atapi = 1,
@@ -305,6 +305,232 @@ pub const MessagingDevicePath = union(Subtype) {
Vendor = 10,
_,
};
+
+ pub const AtapiDevicePath = packed struct {
+ const Role = enum(u8) {
+ Master = 0,
+ Slave = 1,
+ };
+
+ const Rank = enum(u8) {
+ Primary = 0,
+ Secondary = 1,
+ };
+
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ primary_secondary: Rank,
+ slave_master: Role,
+ logical_unit_number: u16,
+ };
+
+ pub const ScsiDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ target_id: u16,
+ logical_unit_number: u16,
+ };
+
+ pub const FibreChannelDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ reserved: u32,
+ world_wide_name: u64,
+ logical_unit_number: u64,
+ };
+
+ pub const FibreChannelExDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ reserved: u32,
+ world_wide_name: [8]u8,
+ logical_unit_number: [8]u8,
+ };
+
+ pub const F1394DevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ reserved: u32,
+ guid: u64,
+ };
+
+ pub const UsbDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ parent_port_number: u8,
+ interface_number: u8,
+ };
+
+ pub const SataDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ hba_port_number: u16,
+ port_multiplier_port_number: u16,
+ logical_unit_number: u16,
+ };
+
+ pub const UsbWwidDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ interface_number: u16,
+ device_vendor_id: u16,
+ device_product_id: u16,
+
+ pub fn serial_number(self: *const UsbWwidDevicePath) []const u16 {
+ var serial_len = (self.length - @sizeOf(UsbWwidDevicePath)) / @sizeOf(u16);
+ return @ptrCast([*]u16, @ptrCast([*]u8, self) + @sizeOf(UsbWwidDevicePath))[0..serial_len];
+ }
+ };
+
+ pub const DeviceLogicalUnitDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ lun: u8,
+ };
+
+ pub const UsbClassDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ vendor_id: u16,
+ product_id: u16,
+ device_class: u8,
+ device_subclass: u8,
+ device_protocol: u8,
+ };
+
+ pub const I2oDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ tid: u32,
+ };
+
+ pub const MacAddressDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ mac_address: uefi.MacAddress,
+ if_type: u8,
+ };
+
+ pub const Ipv4DevicePath = packed struct {
+ pub const IpType = enum(u8) {
+ Dhcp = 0,
+ Static = 1,
+ };
+
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ local_ip_address: uefi.Ipv4Address,
+ remote_ip_address: uefi.Ipv4Address,
+ local_port: u16,
+ remote_port: u16,
+ network_protocol: u16,
+ static_ip_address: IpType,
+ gateway_ip_address: u32,
+ subnet_mask: u32,
+ };
+
+ pub const Ipv6DevicePath = packed struct {
+ pub const Origin = enum(u8) {
+ Manual = 0,
+ AssignedStateless = 1,
+ AssignedStateful = 2,
+ };
+
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ local_ip_address: uefi.Ipv6Address,
+ remote_ip_address: uefi.Ipv6Address,
+ local_port: u16,
+ remote_port: u16,
+ protocol: u16,
+ ip_address_origin: Origin,
+ prefix_length: u8,
+ gateway_ip_address: uefi.Ipv6Address,
+ };
+
+ pub const VlanDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ vlan_id: u16,
+ };
+
+ pub const InfiniBandDevicePath = packed struct {
+ pub const ResourceFlags = packed struct {
+ pub const ControllerType = enum(u1) {
+ Ioc = 0,
+ Service = 1,
+ };
+
+ ioc_or_service: ControllerType,
+ extend_boot_environment: bool,
+ console_protocol: bool,
+ storage_protocol: bool,
+ network_protocol: bool,
+
+ // u1 + 4 * bool = 5 bits, we need a total of 32 bits
+ reserved: u27,
+ };
+
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ resource_flags: ResourceFlags,
+ port_gid: [16]u8,
+ service_id: u64,
+ target_port_id: u64,
+ device_id: u64,
+ };
+
+ pub const UartDevicePath = packed struct {
+ pub const Parity = enum(u8) {
+ Default = 0,
+ None = 1,
+ Even = 2,
+ Odd = 3,
+ Mark = 4,
+ Space = 5,
+ _,
+ };
+
+ pub const StopBits = enum(u8) {
+ Default = 0,
+ One = 1,
+ OneAndAHalf = 2,
+ Two = 3,
+ _,
+ };
+
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ reserved: u16,
+ baud_rate: u32,
+ data_bits: u8,
+ parity: Parity,
+ stop_bits: StopBits,
+ };
+
+ pub const VendorDefinedDevicePath = packed struct {
+ type: DevicePathType,
+ subtype: Subtype,
+ length: u16,
+ vendor_guid: Guid,
+ };
};
pub const MediaDevicePath = union(Subtype) {
@@ -332,24 +558,44 @@ pub const MediaDevicePath = union(Subtype) {
};
pub const HardDriveDevicePath = packed struct {
+ pub const Format = enum(u8) {
+ LegacyMbr = 0x01,
+ GuidPartitionTable = 0x02,
+ };
+
+ pub const SignatureType = enum(u8) {
+ NoSignature = 0x00,
+ /// "32-bit signature from address 0x1b8 of the type 0x01 MBR"
+ MbrSignature = 0x01,
+ GuidSignature = 0x02,
+ };
+
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ partition_number: u32,
+ partition_start: u64,
+ partition_size: u64,
+ partition_signature: [16]u8,
+ partition_format: Format,
+ signature_type: SignatureType,
};
pub const CdromDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ boot_entry: u32,
+ partition_start: u64,
+ partition_size: u64,
};
pub const VendorDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ guid: Guid,
+ // vendor-defined variable data
};
pub const FilePathDevicePath = packed struct {
@@ -366,19 +612,21 @@ pub const MediaDevicePath = union(Subtype) {
type: DevicePathType,
subtype: Subtype,
length: u16,
- // TODO
+ guid: Guid,
};
pub const PiwgFirmwareFileDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
+ fv_filename: Guid,
};
pub const PiwgFirmwareVolumeDevicePath = packed struct {
type: DevicePathType,
subtype: Subtype,
length: u16,
+ fv_name: Guid,
};
pub const RelativeOffsetRangeDevicePath = packed struct {
@@ -396,7 +644,7 @@ pub const MediaDevicePath = union(Subtype) {
length: u16,
start: u64,
end: u64,
- disk_type: uefi.Guid,
+ disk_type: Guid,
instance: u16,
};
};
From dab4c63684ca951049fb8e8f6b2415857eb6652b Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Fri, 14 Jan 2022 08:53:56 -0500
Subject: [PATCH 0008/2031] std/os/uefi: Refactor getDevicePath()
Uses comptime loops over the types instead of writing out a large
switch.
---
.../uefi/protocols/device_path_protocol.zig | 95 +++++++------------
1 file changed, 34 insertions(+), 61 deletions(-)
diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig
index a6509bb14f..ae5822ba35 100644
--- a/lib/std/os/uefi/protocols/device_path_protocol.zig
+++ b/lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -72,66 +72,39 @@ pub const DevicePathProtocol = packed struct {
}
pub fn getDevicePath(self: *const DevicePathProtocol) ?DevicePath {
- return switch (self.type) {
- .Hardware => blk: {
- const hardware: ?HardwareDevicePath = switch (@intToEnum(HardwareDevicePath.Subtype, self.subtype)) {
- .Pci => .{ .Pci = @ptrCast(*const HardwareDevicePath.PciDevicePath, self) },
- .PcCard => .{ .PcCard = @ptrCast(*const HardwareDevicePath.PcCardDevicePath, self) },
- .MemoryMapped => .{ .MemoryMapped = @ptrCast(*const HardwareDevicePath.MemoryMappedDevicePath, self) },
- .Vendor => .{ .Vendor = @ptrCast(*const HardwareDevicePath.VendorDevicePath, self) },
- .Controller => .{ .Controller = @ptrCast(*const HardwareDevicePath.ControllerDevicePath, self) },
- .Bmc => .{ .Bmc = @ptrCast(*const HardwareDevicePath.BmcDevicePath, self) },
- _ => null,
- };
- break :blk if (hardware) |h| .{ .Hardware = h } else null;
- },
- .Acpi => blk: {
- const acpi: ?AcpiDevicePath = switch (@intToEnum(AcpiDevicePath.Subtype, self.subtype)) {
- .Acpi => .{ .Acpi = @ptrCast(*const AcpiDevicePath.BaseAcpiDevicePath, self) },
- .ExpandedAcpi => .{ .ExpandedAcpi = @ptrCast(*const AcpiDevicePath.ExpandedAcpiDevicePath, self) },
- .Adr => .{ .Adr = @ptrCast(*const AcpiDevicePath.AdrDevicePath, self) },
- _ => null,
- };
- break :blk if (acpi) |a| .{ .Acpi = a } else null;
- },
- .Messaging => blk: {
- const messaging: ?MessagingDevicePath = switch (@intToEnum(MessagingDevicePath.Subtype, self.subtype)) {
- else => null, // TODO
- };
- break :blk if (messaging) |m| .{ .Messaging = m } else null;
- },
- .Media => blk: {
- const media: ?MediaDevicePath = switch (@intToEnum(MediaDevicePath.Subtype, self.subtype)) {
- .HardDrive => .{ .HardDrive = @ptrCast(*const MediaDevicePath.HardDriveDevicePath, self) },
- .Cdrom => .{ .Cdrom = @ptrCast(*const MediaDevicePath.CdromDevicePath, self) },
- .Vendor => .{ .Vendor = @ptrCast(*const MediaDevicePath.VendorDevicePath, self) },
- .FilePath => .{ .FilePath = @ptrCast(*const MediaDevicePath.FilePathDevicePath, self) },
- .MediaProtocol => .{ .MediaProtocol = @ptrCast(*const MediaDevicePath.MediaProtocolDevicePath, self) },
- .PiwgFirmwareFile => .{ .PiwgFirmwareFile = @ptrCast(*const MediaDevicePath.PiwgFirmwareFileDevicePath, self) },
- .PiwgFirmwareVolume => .{ .PiwgFirmwareVolume = @ptrCast(*const MediaDevicePath.PiwgFirmwareVolumeDevicePath, self) },
- .RelativeOffsetRange => .{ .RelativeOffsetRange = @ptrCast(*const MediaDevicePath.RelativeOffsetRangeDevicePath, self) },
- .RamDisk => .{ .RamDisk = @ptrCast(*const MediaDevicePath.RamDiskDevicePath, self) },
- _ => null,
- };
- break :blk if (media) |m| .{ .Media = m } else null;
- },
- .BiosBootSpecification => blk: {
- const bbs: ?BiosBootSpecificationDevicePath = switch (@intToEnum(BiosBootSpecificationDevicePath.Subtype, self.subtype)) {
- .BBS101 => .{ .BBS101 = @ptrCast(*const BiosBootSpecificationDevicePath.BBS101DevicePath, self) },
- _ => null,
- };
- break :blk if (bbs) |b| .{ .BiosBootSpecification = b } else null;
- },
- .End => blk: {
- const end: ?EndDevicePath = switch (@intToEnum(EndDevicePath.Subtype, self.subtype)) {
- .EndEntire => .{ .EndEntire = @ptrCast(*const EndDevicePath.EndEntireDevicePath, self) },
- .EndThisInstance => .{ .EndThisInstance = @ptrCast(*const EndDevicePath.EndThisInstanceDevicePath, self) },
- _ => null,
- };
- break :blk if (end) |e| .{ .End = e } else null;
- },
- _ => null,
- };
+ inline for (@typeInfo(DevicePath).Union.fields) |ufield| {
+ const enum_value = std.meta.stringToEnum(DevicePathType, ufield.name);
+
+ // Got the associated union type for self.type, now
+ // we need to initialize it and its subtype
+ if (self.type == enum_value) {
+ var subtype = self.initSubtype(ufield.field_type);
+
+ if (subtype) |sb| {
+ // e.g. return .{ .Hardware = .{ .Pci = @ptrCast(...) } }
+ return @unionInit(DevicePath, ufield.name, sb);
+ }
+ }
+ }
+
+ return null;
+ }
+
+ pub fn initSubtype(self: *const DevicePathProtocol, comptime TUnion: type) ?TUnion {
+ const type_info = @typeInfo(TUnion).Union;
+ const TTag = type_info.tag_type.?;
+
+ inline for (type_info.fields) |subtype| {
+ // The tag names match the union names, so just grab that off the enum
+ const tag_val: u8 = @enumToInt(@field(TTag, subtype.name));
+
+ if (self.subtype == tag_val) {
+ // e.g. expr = .{ .Pci = @ptrCast(...) }
+ return @unionInit(TUnion, subtype.name, @ptrCast(subtype.field_type, self));
+ }
+ }
+
+ return null;
}
};
@@ -268,7 +241,7 @@ pub const MessagingDevicePath = union(Subtype) {
Atapi: *const AtapiDevicePath,
Scsi: *const ScsiDevicePath,
FibreChannel: *const FibreChannelDevicePath,
- FibreChannelEx: FibreChannelExDevicePath,
+ FibreChannelEx: *const FibreChannelExDevicePath,
@"1394": *const F1394DevicePath,
Usb: *const UsbDevicePath,
Sata: *const SataDevicePath,
From 3c2eddae5a4e89ec428a7b3b9f089c32f57b17af Mon Sep 17 00:00:00 2001
From: viri
Date: Sat, 15 Jan 2022 17:27:17 -0600
Subject: [PATCH 0009/2031] std.os.windows: fix casing for `ntdll.lib`
I've seen having this be wrong break some cross-compilers, and it's
also how it is in other files so it's best to be consistent.
It's also just the actual casing of the file.
---
lib/std/os/windows/ntdll.zig | 44 ++++++++++++++++++------------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig
index 41c1a905ec..1154558d4d 100644
--- a/lib/std/os/windows/ntdll.zig
+++ b/lib/std/os/windows/ntdll.zig
@@ -23,23 +23,23 @@ const FILE_BASIC_INFORMATION = windows.FILE_BASIC_INFORMATION;
const SIZE_T = windows.SIZE_T;
const CURDIR = windows.CURDIR;
-pub extern "NtDll" fn RtlGetVersion(
+pub extern "ntdll" fn RtlGetVersion(
lpVersionInformation: *RTL_OSVERSIONINFOW,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn RtlCaptureStackBackTrace(
+pub extern "ntdll" fn RtlCaptureStackBackTrace(
FramesToSkip: DWORD,
FramesToCapture: DWORD,
BackTrace: **anyopaque,
BackTraceHash: ?*DWORD,
) callconv(WINAPI) WORD;
-pub extern "NtDll" fn NtQueryInformationFile(
+pub extern "ntdll" fn NtQueryInformationFile(
FileHandle: HANDLE,
IoStatusBlock: *IO_STATUS_BLOCK,
FileInformation: *anyopaque,
Length: ULONG,
FileInformationClass: FILE_INFORMATION_CLASS,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtSetInformationFile(
+pub extern "ntdll" fn NtSetInformationFile(
FileHandle: HANDLE,
IoStatusBlock: *IO_STATUS_BLOCK,
FileInformation: PVOID,
@@ -47,12 +47,12 @@ pub extern "NtDll" fn NtSetInformationFile(
FileInformationClass: FILE_INFORMATION_CLASS,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtQueryAttributesFile(
+pub extern "ntdll" fn NtQueryAttributesFile(
ObjectAttributes: *OBJECT_ATTRIBUTES,
FileAttributes: *FILE_BASIC_INFORMATION,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtCreateFile(
+pub extern "ntdll" fn NtCreateFile(
FileHandle: *HANDLE,
DesiredAccess: ACCESS_MASK,
ObjectAttributes: *OBJECT_ATTRIBUTES,
@@ -65,7 +65,7 @@ pub extern "NtDll" fn NtCreateFile(
EaBuffer: ?*anyopaque,
EaLength: ULONG,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtDeviceIoControlFile(
+pub extern "ntdll" fn NtDeviceIoControlFile(
FileHandle: HANDLE,
Event: ?HANDLE,
ApcRoutine: ?IO_APC_ROUTINE,
@@ -77,7 +77,7 @@ pub extern "NtDll" fn NtDeviceIoControlFile(
OutputBuffer: ?PVOID,
OutputBufferLength: ULONG,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtFsControlFile(
+pub extern "ntdll" fn NtFsControlFile(
FileHandle: HANDLE,
Event: ?HANDLE,
ApcRoutine: ?IO_APC_ROUTINE,
@@ -89,16 +89,16 @@ pub extern "NtDll" fn NtFsControlFile(
OutputBuffer: ?PVOID,
OutputBufferLength: ULONG,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtClose(Handle: HANDLE) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn RtlDosPathNameToNtPathName_U(
+pub extern "ntdll" fn NtClose(Handle: HANDLE) callconv(WINAPI) NTSTATUS;
+pub extern "ntdll" fn RtlDosPathNameToNtPathName_U(
DosPathName: [*:0]const u16,
NtPathName: *UNICODE_STRING,
NtFileNamePart: ?*?[*:0]const u16,
DirectoryInfo: ?*CURDIR,
) callconv(WINAPI) BOOL;
-pub extern "NtDll" fn RtlFreeUnicodeString(UnicodeString: *UNICODE_STRING) callconv(WINAPI) void;
+pub extern "ntdll" fn RtlFreeUnicodeString(UnicodeString: *UNICODE_STRING) callconv(WINAPI) void;
-pub extern "NtDll" fn NtQueryDirectoryFile(
+pub extern "ntdll" fn NtQueryDirectoryFile(
FileHandle: HANDLE,
Event: ?HANDLE,
ApcRoutine: ?IO_APC_ROUTINE,
@@ -112,30 +112,30 @@ pub extern "NtDll" fn NtQueryDirectoryFile(
RestartScan: BOOLEAN,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtCreateKeyedEvent(
+pub extern "ntdll" fn NtCreateKeyedEvent(
KeyedEventHandle: *HANDLE,
DesiredAccess: ACCESS_MASK,
ObjectAttributes: ?PVOID,
Flags: ULONG,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtReleaseKeyedEvent(
+pub extern "ntdll" fn NtReleaseKeyedEvent(
EventHandle: ?HANDLE,
Key: ?*const anyopaque,
Alertable: BOOLEAN,
Timeout: ?*const LARGE_INTEGER,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtWaitForKeyedEvent(
+pub extern "ntdll" fn NtWaitForKeyedEvent(
EventHandle: ?HANDLE,
Key: ?*const anyopaque,
Alertable: BOOLEAN,
Timeout: ?*const LARGE_INTEGER,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn RtlSetCurrentDirectory_U(PathName: *UNICODE_STRING) callconv(WINAPI) NTSTATUS;
+pub extern "ntdll" fn RtlSetCurrentDirectory_U(PathName: *UNICODE_STRING) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtQueryObject(
+pub extern "ntdll" fn NtQueryObject(
Handle: HANDLE,
ObjectInformationClass: OBJECT_INFORMATION_CLASS,
ObjectInformation: PVOID,
@@ -143,22 +143,22 @@ pub extern "NtDll" fn NtQueryObject(
ReturnLength: ?*ULONG,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn RtlWakeAddressAll(
+pub extern "ntdll" fn RtlWakeAddressAll(
Address: ?*const anyopaque,
) callconv(WINAPI) void;
-pub extern "NtDll" fn RtlWakeAddressSingle(
+pub extern "ntdll" fn RtlWakeAddressSingle(
Address: ?*const anyopaque,
) callconv(WINAPI) void;
-pub extern "NtDll" fn RtlWaitOnAddress(
+pub extern "ntdll" fn RtlWaitOnAddress(
Address: ?*const anyopaque,
CompareAddress: ?*const anyopaque,
AddressSize: SIZE_T,
Timeout: ?*const LARGE_INTEGER,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtLockFile(
+pub extern "ntdll" fn NtLockFile(
FileHandle: HANDLE,
Event: ?HANDLE,
ApcRoutine: ?*IO_APC_ROUTINE,
@@ -171,7 +171,7 @@ pub extern "NtDll" fn NtLockFile(
ExclusiveLock: BOOLEAN,
) callconv(WINAPI) NTSTATUS;
-pub extern "NtDll" fn NtUnlockFile(
+pub extern "ntdll" fn NtUnlockFile(
FileHandle: HANDLE,
IoStatusBlock: *IO_STATUS_BLOCK,
ByteOffset: *const LARGE_INTEGER,
From 12d6bcec029dbb11bd533d0d0f0cc723aa71bc9d Mon Sep 17 00:00:00 2001
From: viri
Date: Sat, 15 Jan 2022 17:35:30 -0600
Subject: [PATCH 0010/2031] std.os.windows: add ntdll thread information APIs
---
lib/std/os/windows/ntdll.zig | 65 ++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig
index 1154558d4d..26cc19935f 100644
--- a/lib/std/os/windows/ntdll.zig
+++ b/lib/std/os/windows/ntdll.zig
@@ -23,6 +23,71 @@ const FILE_BASIC_INFORMATION = windows.FILE_BASIC_INFORMATION;
const SIZE_T = windows.SIZE_T;
const CURDIR = windows.CURDIR;
+pub const THREADINFOCLASS = enum(c_int) {
+ ThreadBasicInformation,
+ ThreadTimes,
+ ThreadPriority,
+ ThreadBasePriority,
+ ThreadAffinityMask,
+ ThreadImpersonationToken,
+ ThreadDescriptorTableEntry,
+ ThreadEnableAlignmentFaultFixup,
+ ThreadEventPair_Reusable,
+ ThreadQuerySetWin32StartAddress,
+ ThreadZeroTlsCell,
+ ThreadPerformanceCount,
+ ThreadAmILastThread,
+ ThreadIdealProcessor,
+ ThreadPriorityBoost,
+ ThreadSetTlsArrayAddress,
+ ThreadIsIoPending,
+ // Windows 2000+ from here
+ ThreadHideFromDebugger,
+ // Windows XP+ from here
+ ThreadBreakOnTermination,
+ ThreadSwitchLegacyState,
+ ThreadIsTerminated,
+ // Windows Vista+ from here
+ ThreadLastSystemCall,
+ ThreadIoPriority,
+ ThreadCycleTime,
+ ThreadPagePriority,
+ ThreadActualBasePriority,
+ ThreadTebInformation,
+ ThreadCSwitchMon,
+ // Windows 7+ from here
+ ThreadCSwitchPmu,
+ ThreadWow64Context,
+ ThreadGroupInformation,
+ ThreadUmsInformation,
+ ThreadCounterProfiling,
+ ThreadIdealProcessorEx,
+ // Windows 8+ from here
+ ThreadCpuAccountingInformation,
+ // Windows 8.1+ from here
+ ThreadSuspendCount,
+ // Windows 10+ from here
+ ThreadHeterogeneousCpuPolicy,
+ ThreadContainerId,
+ ThreadNameInformation,
+ ThreadSelectedCpuSets,
+ ThreadSystemThreadInformation,
+ ThreadActualGroupAffinity,
+};
+pub extern "ntdll" fn NtQueryInformationThread(
+ ThreadHandle: HANDLE,
+ ThreadInformationClass: THREADINFOCLASS,
+ ThreadInformation: *anyopaque,
+ ThreadInformationLength: ULONG,
+ ReturnLength: ?*ULONG,
+) callconv(WINAPI) NTSTATUS;
+pub extern "ntdll" fn NtSetInformationThread(
+ ThreadHandle: HANDLE,
+ ThreadInformationClass: THREADINFOCLASS,
+ ThreadInformation: *const anyopaque,
+ ThreadInformationLength: ULONG,
+) callconv(WINAPI) NTSTATUS;
+
pub extern "ntdll" fn RtlGetVersion(
lpVersionInformation: *RTL_OSVERSIONINFOW,
) callconv(WINAPI) NTSTATUS;
From 4771ac298b0c692750524cb7e94eaf0b4343ce2b Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Sun, 16 Jan 2022 01:44:15 -0500
Subject: [PATCH 0011/2031] std/os/uefi: Simplify packed struct padding and
default zero-initialize
Beyond adding default zero-initialization, this commit changes undefined
initialization to zero, as some cases reserved the padding and on other
cases, I've found some systems act strange when giving uninit instead of
zero even when it shouldn't be an issue, one example being
FileProtocol.Open's attributes, which *should* be ignored when not
creating a file, but ended up giving an unrelated error.
---
lib/std/os/uefi.zig | 2 --
lib/std/os/uefi/protocols/absolute_pointer_protocol.zig | 8 ++------
lib/std/os/uefi/protocols/edid_override_protocol.zig | 4 +---
lib/std/os/uefi/protocols/hii.zig | 4 ++--
lib/std/os/uefi/protocols/simple_network_protocol.zig | 8 ++------
.../os/uefi/protocols/simple_text_input_ex_protocol.zig | 4 ++--
lib/std/os/uefi/tables/boot_services.zig | 4 +---
7 files changed, 10 insertions(+), 24 deletions(-)
diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig
index 1bceab5b2a..4644130e7f 100644
--- a/lib/std/os/uefi.zig
+++ b/lib/std/os/uefi.zig
@@ -98,7 +98,6 @@ pub const Time = extern struct {
/// 0 - 59
second: u8,
- _pad1: u8,
/// 0 - 999999999
nanosecond: u32,
@@ -115,7 +114,6 @@ pub const Time = extern struct {
/// If true, the time is affected by daylight savings time.
adjust_daylight: bool,
},
- _pad2: u8,
/// Time is to be interpreted as local time
pub const unspecified_timezone: i16 = 0x7ff;
diff --git a/lib/std/os/uefi/protocols/absolute_pointer_protocol.zig b/lib/std/os/uefi/protocols/absolute_pointer_protocol.zig
index ee79569233..11a5008d9a 100644
--- a/lib/std/os/uefi/protocols/absolute_pointer_protocol.zig
+++ b/lib/std/os/uefi/protocols/absolute_pointer_protocol.zig
@@ -40,9 +40,7 @@ pub const AbsolutePointerMode = extern struct {
attributes: packed struct {
supports_alt_active: bool,
supports_pressure_as_z: bool,
- _pad1: u6,
- _pad2: u8,
- _pad3: u16,
+ _pad: u30 = 0,
},
};
@@ -53,8 +51,6 @@ pub const AbsolutePointerState = extern struct {
active_buttons: packed struct {
touch_active: bool,
alt_active: bool,
- _pad1: u6,
- _pad2: u8,
- _pad3: u16,
+ _pad: u30 = 0,
},
};
diff --git a/lib/std/os/uefi/protocols/edid_override_protocol.zig b/lib/std/os/uefi/protocols/edid_override_protocol.zig
index 8bf848c59a..9540cdc5d4 100644
--- a/lib/std/os/uefi/protocols/edid_override_protocol.zig
+++ b/lib/std/os/uefi/protocols/edid_override_protocol.zig
@@ -26,7 +26,5 @@ pub const EdidOverrideProtocol = extern struct {
pub const EdidOverrideProtocolAttributes = packed struct {
dont_override: bool,
enable_hot_plug: bool,
- _pad1: u6,
- _pad2: u8,
- _pad3: u16,
+ _pad: u30 = 0,
};
diff --git a/lib/std/os/uefi/protocols/hii.zig b/lib/std/os/uefi/protocols/hii.zig
index 5e3c23d22a..b0b0418611 100644
--- a/lib/std/os/uefi/protocols/hii.zig
+++ b/lib/std/os/uefi/protocols/hii.zig
@@ -48,7 +48,7 @@ pub const NarrowGlyph = extern struct {
attributes: packed struct {
non_spacing: bool,
wide: bool,
- _pad: u6,
+ _pad: u6 = 0,
},
glyph_col_1: [19]u8,
};
@@ -62,7 +62,7 @@ pub const WideGlyph = extern struct {
},
glyph_col_1: [19]u8,
glyph_col_2: [19]u8,
- _pad: [3]u8,
+ _pad: [3]u8 = [_]u8{0} ** 3,
};
pub const HIIStringPackage = extern struct {
diff --git a/lib/std/os/uefi/protocols/simple_network_protocol.zig b/lib/std/os/uefi/protocols/simple_network_protocol.zig
index 01e5986c2d..283d944d34 100644
--- a/lib/std/os/uefi/protocols/simple_network_protocol.zig
+++ b/lib/std/os/uefi/protocols/simple_network_protocol.zig
@@ -126,9 +126,7 @@ pub const SimpleNetworkReceiveFilter = packed struct {
receive_broadcast: bool,
receive_promiscuous: bool,
receive_promiscuous_multicast: bool,
- _pad1: u3 = undefined,
- _pad2: u8 = undefined,
- _pad3: u16 = undefined,
+ _pad: u27 = 0,
};
pub const SimpleNetworkState = enum(u32) {
@@ -171,7 +169,5 @@ pub const SimpleNetworkInterruptStatus = packed struct {
transmit_interrupt: bool,
command_interrupt: bool,
software_interrupt: bool,
- _pad1: u4,
- _pad2: u8,
- _pad3: u16,
+ _pad: u28 = 0,
};
diff --git a/lib/std/os/uefi/protocols/simple_text_input_ex_protocol.zig b/lib/std/os/uefi/protocols/simple_text_input_ex_protocol.zig
index d816deb8cb..fbcebf1121 100644
--- a/lib/std/os/uefi/protocols/simple_text_input_ex_protocol.zig
+++ b/lib/std/os/uefi/protocols/simple_text_input_ex_protocol.zig
@@ -64,14 +64,14 @@ pub const KeyState = extern struct {
left_logo_pressed: bool,
menu_key_pressed: bool,
sys_req_pressed: bool,
- _pad1: u21,
+ _pad: u21 = 0,
shift_state_valid: bool,
},
key_toggle_state: packed struct {
scroll_lock_active: bool,
num_lock_active: bool,
caps_lock_active: bool,
- _pad1: u3,
+ _pad: u3 = 0,
key_state_exposed: bool,
toggle_state_valid: bool,
},
diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig
index 7da007b05d..468aaf4cf7 100644
--- a/lib/std/os/uefi/tables/boot_services.zig
+++ b/lib/std/os/uefi/tables/boot_services.zig
@@ -239,9 +239,7 @@ pub const OpenProtocolAttributes = packed struct {
by_child_controller: bool = false,
by_driver: bool = false,
exclusive: bool = false,
- _pad1: u2 = undefined,
- _pad2: u8 = undefined,
- _pad3: u16 = undefined,
+ _pad: u26 = 0,
};
pub const ProtocolInformationEntry = extern struct {
From d276a1189de15f85523a0e6844d0fd316306838a Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Sun, 16 Jan 2022 01:50:02 -0500
Subject: [PATCH 0012/2031] std/os/uefi: Align first field of
EdidOverrideProtocolAttributes to 4
This makes the struct align(4), which allows it to be passed as flags
more easily.
---
lib/std/os/uefi/protocols/edid_override_protocol.zig | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/lib/std/os/uefi/protocols/edid_override_protocol.zig b/lib/std/os/uefi/protocols/edid_override_protocol.zig
index 9540cdc5d4..800c269c67 100644
--- a/lib/std/os/uefi/protocols/edid_override_protocol.zig
+++ b/lib/std/os/uefi/protocols/edid_override_protocol.zig
@@ -8,7 +8,6 @@ pub const EdidOverrideProtocol = extern struct {
_get_edid: fn (*const EdidOverrideProtocol, Handle, *u32, *usize, *?[*]u8) callconv(.C) Status,
/// Returns policy information and potentially a replacement EDID for the specified video output device.
- /// attributes must be align(4)
pub fn getEdid(self: *const EdidOverrideProtocol, handle: Handle, attributes: *EdidOverrideProtocolAttributes, edid_size: *usize, edid: *?[*]u8) Status {
return self._get_edid(self, handle, attributes, edid_size, edid);
}
@@ -24,7 +23,7 @@ pub const EdidOverrideProtocol = extern struct {
};
pub const EdidOverrideProtocolAttributes = packed struct {
- dont_override: bool,
+ dont_override: bool align(4),
enable_hot_plug: bool,
_pad: u30 = 0,
};
From 628a7f85deb500624fb75f83014d3bc1a1c03cf4 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Sun, 16 Jan 2022 02:35:22 -0500
Subject: [PATCH 0013/2031] std/os/uefi: Add conversion from Status to EfiError
Allows handling uefi function errors in a more zig-style way with try
and catch, using `try f().err()` when a `Status` is returned.
---
lib/std/os/uefi/status.zig | 69 ++++++++++++++++++++++++++++++++++++++
1 file changed, 69 insertions(+)
diff --git a/lib/std/os/uefi/status.zig b/lib/std/os/uefi/status.zig
index 09bc5030eb..e6f2024f7d 100644
--- a/lib/std/os/uefi/status.zig
+++ b/lib/std/os/uefi/status.zig
@@ -1,3 +1,5 @@
+const testing = @import("std").testing;
+
const high_bit = 1 << @typeInfo(usize).Int.bits - 1;
pub const Status = enum(usize) {
@@ -139,4 +141,71 @@ pub const Status = enum(usize) {
WarnResetRequired = 7,
_,
+
+ pub const EfiError = error{
+ LoadError,
+ InvalidParameter,
+ Unsupported,
+ BadBufferSize,
+ BufferTooSmall,
+ NotReady,
+ DeviceError,
+ WriteProtected,
+ OutOfResources,
+ VolumeCorrupted,
+ VolumeFull,
+ NoMedia,
+ MediaChanged,
+ NotFound,
+ AccessDenied,
+ NoResponse,
+ NoMapping,
+ Timeout,
+ NotStarted,
+ AlreadyStarted,
+ Aborted,
+ IcmpError,
+ TftpError,
+ ProtocolError,
+ IncompatibleVersion,
+ SecurityViolation,
+ CrcError,
+ EndOfMedia,
+ EndOfFile,
+ InvalidLanguage,
+ CompromisedData,
+ IpAddressConflict,
+ HttpError,
+ NetworkUnreachable,
+ HostUnreachable,
+ ProtocolUnreachable,
+ PortUnreachable,
+ ConnectionFin,
+ ConnectionReset,
+ ConnectionRefused,
+ WarnUnknownGlyph,
+ WarnDeleteFailure,
+ WarnWriteFailure,
+ WarnBufferTooSmall,
+ WarnStaleData,
+ WarnFileSystem,
+ WarnResetRequired,
+ };
+
+ pub fn err(self: Status) EfiError!void {
+ inline for (@typeInfo(EfiError).ErrorSet.?) |efi_err| {
+ if (self == @field(Status, efi_err.name)) {
+ return @field(EfiError, efi_err.name);
+ }
+ }
+ // self is .Success
+ }
};
+
+test "status" {
+ var st: Status = .DeviceError;
+ try testing.expectError(error.DeviceError, st.err());
+
+ st = .Success;
+ try st.err();
+}
From c727bd1bb68917f39f03f68765c5db7572069729 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Sun, 16 Jan 2022 02:52:45 -0500
Subject: [PATCH 0014/2031] std/os/uefi: Fix parameter type mismatch in
edid_override_protocol
---
lib/std/os/uefi/protocols/edid_override_protocol.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/os/uefi/protocols/edid_override_protocol.zig b/lib/std/os/uefi/protocols/edid_override_protocol.zig
index 800c269c67..3eb39330f4 100644
--- a/lib/std/os/uefi/protocols/edid_override_protocol.zig
+++ b/lib/std/os/uefi/protocols/edid_override_protocol.zig
@@ -9,7 +9,7 @@ pub const EdidOverrideProtocol = extern struct {
/// Returns policy information and potentially a replacement EDID for the specified video output device.
pub fn getEdid(self: *const EdidOverrideProtocol, handle: Handle, attributes: *EdidOverrideProtocolAttributes, edid_size: *usize, edid: *?[*]u8) Status {
- return self._get_edid(self, handle, attributes, edid_size, edid);
+ return self._get_edid(self, handle, @ptrCast(*u32, attributes), edid_size, edid);
}
pub const guid align(8) = Guid{
From 03347114c3e5f6238d7871e878e1d2295bdf0260 Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Sun, 16 Jan 2022 03:04:37 -0500
Subject: [PATCH 0015/2031] std/os/uefi: Add util function for opening
protocols
---
lib/std/os/uefi/tables/boot_services.zig | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig
index 468aaf4cf7..ee1e72f094 100644
--- a/lib/std/os/uefi/tables/boot_services.zig
+++ b/lib/std/os/uefi/tables/boot_services.zig
@@ -154,6 +154,27 @@ pub const BootServices = extern struct {
/// Creates an event in a group.
createEventEx: fn (type: u32, notify_tpl: usize, notify_func: EfiEventNotify, notify_ctx: *const anyopaque, event_group: *align(8) const Guid, event: *Event) callconv(.C) Status,
+ /// Opens a protocol with a structure as the loaded image for a UEFI application
+ pub fn openProtocolSt(self: *BootServices, comptime protocol: type, handle: Handle) !*protocol {
+ if (!@hasDecl(protocol, "guid"))
+ @compileError("Protocol is missing guid!");
+
+ var ptr: ?*protocol = undefined;
+
+ try self.openProtocol(
+ handle,
+ &protocol.guid,
+ @ptrCast(*?*anyopaque, &ptr),
+ // Invoking handle (loaded image)
+ uefi.handle,
+ // Control handle (null as not a driver)
+ null,
+ uefi.tables.OpenProtocolAttributes{ .by_handle_protocol = true },
+ ).err();
+
+ return ptr.?;
+ }
+
pub const signature: u64 = 0x56524553544f4f42;
pub const event_timer: u32 = 0x80000000;
From ff6ece3811a1cb44a90986d543d1583a40629edf Mon Sep 17 00:00:00 2001
From: fifty-six
Date: Sun, 16 Jan 2022 03:50:50 -0500
Subject: [PATCH 0016/2031] std/os/uefi: Don't treat efi status warnings as
errors
---
lib/std/os/uefi/status.zig | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/lib/std/os/uefi/status.zig b/lib/std/os/uefi/status.zig
index e6f2024f7d..e975b92a15 100644
--- a/lib/std/os/uefi/status.zig
+++ b/lib/std/os/uefi/status.zig
@@ -183,13 +183,6 @@ pub const Status = enum(usize) {
ConnectionFin,
ConnectionReset,
ConnectionRefused,
- WarnUnknownGlyph,
- WarnDeleteFailure,
- WarnWriteFailure,
- WarnBufferTooSmall,
- WarnStaleData,
- WarnFileSystem,
- WarnResetRequired,
};
pub fn err(self: Status) EfiError!void {
@@ -198,7 +191,7 @@ pub const Status = enum(usize) {
return @field(EfiError, efi_err.name);
}
}
- // self is .Success
+ // self is .Success or Warning
}
};
From b7c7fba5b4f3cf80b36b75bacd401e12a25cf6f8 Mon Sep 17 00:00:00 2001
From: Stephen Gregoratto
Date: Sat, 22 Jan 2022 18:14:07 +1100
Subject: [PATCH 0017/2031] Add classic BPF library
This library contains:
- The global constants as used by C code.
- An Insn struct that implements can generate all the BPF instructions.
- A simple BPF virtual machine implementation that can be used for
testing programs. This has complete code-coverage and has been
extensively fuzzed.
---
lib/std/x.zig | 1 +
lib/std/x/net/bpf.zig | 1008 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 1009 insertions(+)
create mode 100644 lib/std/x/net/bpf.zig
diff --git a/lib/std/x.zig b/lib/std/x.zig
index be0ab25e7a..64caf324ed 100644
--- a/lib/std/x.zig
+++ b/lib/std/x.zig
@@ -9,6 +9,7 @@ pub const os = struct {
pub const net = struct {
pub const ip = @import("x/net/ip.zig");
pub const tcp = @import("x/net/tcp.zig");
+ pub const bpf = @import("x/net/bpf.zig");
};
test {
diff --git a/lib/std/x/net/bpf.zig b/lib/std/x/net/bpf.zig
new file mode 100644
index 0000000000..a99390ab62
--- /dev/null
+++ b/lib/std/x/net/bpf.zig
@@ -0,0 +1,1008 @@
+//! This package provides instrumentation for creating Berkeley Packet Filter[1]
+//! (BPF) programs, along with a simulator for running them.
+//!
+//! BPF is a mechanism for cheap, in-kernel packet filtering. Programs are
+//! attached to a network device and executed for every packet that flows
+//! through it. The program must then return a verdict: the amount of packet
+//! bytes that the kernel should copy into userspace. Execution speed is
+//! achieved by having programs run in a limited virtual machine, which has the
+//! added benefit of graceful failure in the face of buggy programs.
+//!
+//! The BPF virtual machine has a 32-bit word length and a small number of
+//! word-sized registers:
+//!
+//! - The accumulator, `a`: The source/destination of arithmetic and logic
+//! operations.
+//! - The index register, `x`: Used as an offset for indirect memory access and
+//! as a comparison value for conditional jumps.
+//! - The scratch memory store, `M[0]..M[15]`: Used for saving the value of a/x
+//! for later use.
+//!
+//! The packet being examined is an array of bytes, and is addressed using plain
+//! array subscript notation, e.g. [10] for the byte at offset 10. An implicit
+//! program counter, `pc`, is intialized to zero and incremented for each instruction.
+//!
+//! The machine has a fixed instruction set with the following form, where the
+//! numbers represent bit length:
+//!
+//! ```
+//! ┌───────────┬──────┬──────┐
+//! │ opcode:16 │ jt:8 │ jt:8 │
+//! ├───────────┴──────┴──────┤
+//! │ k:32 │
+//! └─────────────────────────┘
+//! ```
+//!
+//! The `opcode` indicates the instruction class and its addressing mode.
+//! Opcodes are generated by performing binary addition on the 8-bit class and
+//! mode constants. For example, the opcode for loading a byte from the packet
+//! at X + 2, (`ldb [x + 2]`), is:
+//!
+//! ```
+//! LD | IND | B = 0x00 | 0x40 | 0x20
+//! = 0x60
+//! ```
+//!
+//! `jt` is an offset used for conditional jumps, and increments the program
+//! counter by its amount if the comparison was true. Conversely, `jf`
+//! increments the counter if it was false. These fields are ignored in all
+//! other cases. `k` is a generic variable used for various purposes, most
+//! commonly as some sort of constant.
+//!
+//! This package contains opcode extensions used by different implementations,
+//! where "extension" is anything outside of the original that was imported into
+//! 4.4BSD[2]. These are marked with "EXTENSION", along with a list of
+//! implementations that use them.
+//!
+//! Most of the doc-comments use the BPF assembly syntax as described in the
+//! original paper[1]. For the sake of completeness, here is the complete
+//! instruction set, along with the extensions:
+//!
+//!```
+//! opcode addressing modes
+//! ld #k #len M[k] [k] [x + k]
+//! ldh [k] [x + k]
+//! ldb [k] [x + k]
+//! ldx #k #len M[k] 4 * ([k] & 0xf) arc4random()
+//! st M[k]
+//! stx M[k]
+//! jmp L
+//! jeq #k, Lt, Lf
+//! jgt #k, Lt, Lf
+//! jge #k, Lt, Lf
+//! jset #k, Lt, Lf
+//! add #k x
+//! sub #k x
+//! mul #k x
+//! div #k x
+//! or #k x
+//! and #k x
+//! lsh #k x
+//! rsh #k x
+//! neg #k x
+//! mod #k x
+//! xor #k x
+//! ret #k a
+//! tax
+//! txa
+//! ```
+//!
+//! Finally, a note on program design. The lack of backwards jumps leads to a
+//! "return early, return often" control flow. Take for example the program
+//! generated from the tcpdump filter `ip`:
+//!
+//! ```
+//! (000) ldh [12] ; Ethernet Packet Type
+//! (001) jeq #0x86dd, 2, 7 ; ETHERTYPE_IPV6
+//! (002) ldb [20] ; IPv6 Next Header
+//! (003) jeq #0x6, 10, 4 ; TCP
+//! (004) jeq #0x2c, 5, 11 ; IPv6 Fragment Header
+//! (005) ldb [54] ; TCP Source Port
+//! (006) jeq #0x6, 10, 11 ; IPPROTO_TCP
+//! (007) jeq #0x800, 8, 11 ; ETHERTYPE_IP
+//! (008) ldb [23] ; IPv4 Protocol
+//! (009) jeq #0x6, 10, 11 ; IPPROTO_TCP
+//! (010) ret #262144 ; copy 0x40000
+//! (011) ret #0 ; skip packet
+//! ```
+//!
+//! Here we can make a few observations:
+//!
+//! - The problem "filter only tcp packets" has essentially been transformed
+//! into a series of layer checks.
+//! - There are two distinct branches in the code, one for validating IPv4
+//! headers and one for IPv6 headers.
+//! - Most conditional jumps in these branches lead directly to the last two
+//! instructions, a pass or fail. Thus the goal of a program is to find the
+//! fastest route to a pass/fail comparison.
+//!
+//! [1]: S. McCanne and V. Jacobson, "The BSD Packet Filter: A New Architecture
+//! for User-level Packet Capture", Proceedings of the 1993 Winter USENIX.
+//! [2]: https://minnie.tuhs.org/cgi-bin/utree.pl?file=4.4BSD/usr/src/sys/net/bpf.h
+const std = @import("std");
+const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
+const mem = std.mem;
+const math = std.math;
+const random = std.crypto.random;
+const assert = std.debug.assert;
+const expectEqual = std.testing.expectEqual;
+const expectError = std.testing.expectError;
+const expect = std.testing.expect;
+
+// instruction classes
+/// ld, ldh, ldb: Load data into a.
+pub const LD = 0x00;
+/// ldx: Load data into x.
+pub const LDX = 0x01;
+/// st: Store into scratch memory the value of a.
+pub const ST = 0x02;
+/// st: Store into scratch memory the value of x.
+pub const STX = 0x03;
+/// alu: Wrapping arithmetic/bitwise operations on a using the value of k/x.
+pub const ALU = 0x04;
+/// jmp, jeq, jgt, je, jset: Increment the program counter based on a comparison
+/// between k/x and the accumulator.
+pub const JMP = 0x05;
+/// ret: Return a verdict using the value of k/the accumulator.
+pub const RET = 0x06;
+/// tax, txa: Register value copying between X and a.
+pub const MISC = 0x07;
+
+// Size of data to be loaded from the packet.
+/// ld: 32-bit full word.
+pub const W = 0x00;
+/// ldh: 16-bit half word.
+pub const H = 0x08;
+/// ldb: Single byte.
+pub const B = 0x10;
+
+// Addressing modes used for loads to a/x.
+/// #k: The immediate value stored in k.
+pub const IMM = 0x00;
+/// [k]: The value at offset k in the packet.
+pub const ABS = 0x20;
+/// [x + k]: The value at offset x + k in the packet.
+pub const IND = 0x40;
+/// M[k]: The value of the k'th scratch memory register.
+pub const MEM = 0x60;
+/// #len: The size of the packet.
+pub const LEN = 0x80;
+/// 4 * ([k] & 0xf): Four times the low four bits of the byte at offset k in the
+/// packet. This is used for efficiently loading the header length of an IP
+/// packet.
+pub const MSH = 0xa0;
+/// arc4random: 32-bit integer generated from a CPRNG (see arc4random(3)) loaded into a.
+/// EXTENSION. Defined for:
+/// - OpenBSD.
+pub const RND = 0xc0;
+
+// Modifiers for different instruction classes.
+/// Use the value of k for alu operations (add #k).
+/// Compare against the value of k for jumps (jeq #k, Lt, Lf).
+/// Return the value of k for returns (ret #k).
+pub const K = 0x00;
+/// Use the value of x for alu operations (add x).
+/// Compare against the value of X for jumps (jeq x, Lt, Lf).
+pub const X = 0x08;
+/// Return the value of a for returns (ret a).
+pub const A = 0x10;
+
+// ALU Operations on a using the value of k/x.
+// All arithmetic operations are defined to overflow the value of a.
+/// add: a = a + k
+/// a = a + x.
+pub const ADD = 0x00;
+/// sub: a = a - k
+/// a = a - x.
+pub const SUB = 0x10;
+/// mul: a = a * k
+/// a = a * x.
+pub const MUL = 0x20;
+/// div: a = a / k
+/// a = a / x.
+/// Truncated division.
+pub const DIV = 0x30;
+/// or: a = a | k
+/// a = a | x.
+pub const OR = 0x40;
+/// and: a = a & k
+/// a = a & x.
+pub const AND = 0x50;
+/// lsh: a = a << k
+/// a = a << x.
+/// a = a << k, a = a << x.
+pub const LSH = 0x60;
+/// rsh: a = a >> k
+/// a = a >> x.
+pub const RSH = 0x70;
+/// neg: a = -a.
+/// Note that this isn't a binary negation, rather the value of `~a + 1`.
+pub const NEG = 0x80;
+/// mod: a = a % k
+/// a = a % x.
+/// EXTENSION. Defined for:
+/// - Linux.
+/// - NetBSD + Minix 3.
+/// - FreeBSD and derivitives.
+pub const MOD = 0x90;
+/// xor: a = a ^ k
+/// a = a ^ x.
+/// EXTENSION. Defined for:
+/// - Linux.
+/// - NetBSD + Minix 3.
+/// - FreeBSD and derivitives.
+pub const XOR = 0xa0;
+
+// Jump operations using a comparison between a and x/k.
+/// jmp L: pc += k.
+/// No comparison done here.
+pub const JA = 0x00;
+/// jeq #k, Lt, Lf: pc += (a == k) ? jt : jf.
+/// jeq x, Lt, Lf: pc += (a == x) ? jt : jf.
+pub const JEQ = 0x10;
+/// jgt #k, Lt, Lf: pc += (a > k) ? jt : jf.
+/// jgt x, Lt, Lf: pc += (a > x) ? jt : jf.
+pub const JGT = 0x20;
+/// jge #k, Lt, Lf: pc += (a >= k) ? jt : jf.
+/// jge x, Lt, Lf: pc += (a >= x) ? jt : jf.
+pub const JGE = 0x30;
+/// jset #k, Lt, Lf: pc += (a & k > 0) ? jt : jf.
+/// jset x, Lt, Lf: pc += (a & x > 0) ? jt : jf.
+pub const JSET = 0x40;
+
+// Miscellaneous operations/register copy.
+/// tax: x = a.
+pub const TAX = 0x00;
+/// txa: a = x.
+pub const TXA = 0x80;
+
+/// The 16 registers in the scratch memory store as named enums.
+pub const Scratch = enum(u4) { m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15 };
+pub const MEMWORDS = 16;
+pub const MAXINSNS = switch (builtin.os.tag) {
+ .linux => 4096,
+ else => 512,
+};
+pub const MINBUFSIZE = 32;
+pub const MAXBUFSIZE = 1 << 21;
+
+pub const Insn = extern struct {
+ opcode: u16,
+ jt: u8,
+ jf: u8,
+ k: u32,
+
+ /// Implements the `std.fmt.format` API.
+ /// The formatting is similar to the output of tcpdump -dd.
+ pub fn format(
+ self: Insn,
+ comptime layout: []const u8,
+ opts: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = opts;
+ if (comptime layout.len != 0 and layout[0] != 's')
+ @compileError("Unsupported format specifier for BPF Insn type '" ++ layout ++ "'.");
+
+ try std.fmt.format(
+ writer,
+ "Insn{{ 0x{X:0<2}, {d}, {d}, 0x{X:0<8} }}",
+ .{ self.opcode, self.jt, self.jf, self.k },
+ );
+ }
+
+ const Size = enum(u8) {
+ word = W,
+ half_word = H,
+ byte = B,
+ };
+
+ fn stmt(opcode: u16, k: u32) Insn {
+ return .{
+ .opcode = opcode,
+ .jt = 0,
+ .jf = 0,
+ .k = k,
+ };
+ }
+
+ pub fn ld_imm(value: u32) Insn {
+ return stmt(LD | IMM, value);
+ }
+
+ pub fn ld_abs(size: Size, offset: u32) Insn {
+ return stmt(LD | ABS | @enumToInt(size), offset);
+ }
+
+ pub fn ld_ind(size: Size, offset: u32) Insn {
+ return stmt(LD | IND | @enumToInt(size), offset);
+ }
+
+ pub fn ld_mem(reg: Scratch) Insn {
+ return stmt(LD | MEM, @enumToInt(reg));
+ }
+
+ pub fn ld_len() Insn {
+ return stmt(LD | LEN | W, 0);
+ }
+
+ pub fn ld_rnd() Insn {
+ return stmt(LD | RND | W, 0);
+ }
+
+ pub fn ldx_imm(value: u32) Insn {
+ return stmt(LDX | IMM, value);
+ }
+
+ pub fn ldx_mem(reg: Scratch) Insn {
+ return stmt(LDX | MEM, @enumToInt(reg));
+ }
+
+ pub fn ldx_len() Insn {
+ return stmt(LDX | LEN | W, 0);
+ }
+
+ pub fn ldx_msh(offset: u32) Insn {
+ return stmt(LDX | MSH | B, offset);
+ }
+
+ pub fn st(reg: Scratch) Insn {
+ return stmt(ST, @enumToInt(reg));
+ }
+ pub fn stx(reg: Scratch) Insn {
+ return stmt(STX, @enumToInt(reg));
+ }
+
+ const AluOp = enum(u16) {
+ add = ADD,
+ sub = SUB,
+ mul = MUL,
+ div = DIV,
+ @"or" = OR,
+ @"and" = AND,
+ lsh = LSH,
+ rsh = RSH,
+ mod = MOD,
+ xor = XOR,
+ };
+
+ const Source = enum(u16) {
+ k = K,
+ x = X,
+ };
+ const KOrX = union(Source) {
+ k: u32,
+ x: void,
+ };
+
+ pub fn alu_neg() Insn {
+ return stmt(ALU | NEG, 0);
+ }
+
+ pub fn alu(op: AluOp, source: KOrX) Insn {
+ return stmt(
+ ALU | @enumToInt(op) | @enumToInt(source),
+ if (source == .k) source.k else 0,
+ );
+ }
+
+ const JmpOp = enum(u16) {
+ jeq = JEQ,
+ jgt = JGT,
+ jge = JGE,
+ jset = JSET,
+ };
+
+ pub fn jmp_ja(location: u32) Insn {
+ return stmt(JMP | JA, location);
+ }
+
+ pub fn jmp(op: JmpOp, source: KOrX, jt: u8, jf: u8) Insn {
+ return Insn{
+ .opcode = JMP | @enumToInt(op) | @enumToInt(source),
+ .jt = jt,
+ .jf = jf,
+ .k = if (source == .k) source.k else 0,
+ };
+ }
+
+ const Verdict = enum(u16) {
+ k = K,
+ a = A,
+ };
+ const KOrA = union(Verdict) {
+ k: u32,
+ a: void,
+ };
+
+ pub fn ret(verdict: KOrA) Insn {
+ return stmt(
+ RET | @enumToInt(verdict),
+ if (verdict == .k) verdict.k else 0,
+ );
+ }
+
+ pub fn tax() Insn {
+ return stmt(MISC | TAX, 0);
+ }
+
+ pub fn txa() Insn {
+ return stmt(MISC | TXA, 0);
+ }
+};
+
+fn opcodeEqual(opcode: u16, insn: Insn) !void {
+ try expectEqual(opcode, insn.opcode);
+}
+
+test "opcodes" {
+ try opcodeEqual(0x00, Insn.ld_imm(0));
+ try opcodeEqual(0x20, Insn.ld_abs(.word, 0));
+ try opcodeEqual(0x28, Insn.ld_abs(.half_word, 0));
+ try opcodeEqual(0x30, Insn.ld_abs(.byte, 0));
+ try opcodeEqual(0x40, Insn.ld_ind(.word, 0));
+ try opcodeEqual(0x48, Insn.ld_ind(.half_word, 0));
+ try opcodeEqual(0x50, Insn.ld_ind(.byte, 0));
+ try opcodeEqual(0x60, Insn.ld_mem(.m0));
+ try opcodeEqual(0x80, Insn.ld_len());
+ try opcodeEqual(0xc0, Insn.ld_rnd());
+
+ try opcodeEqual(0x01, Insn.ldx_imm(0));
+ try opcodeEqual(0x61, Insn.ldx_mem(.m0));
+ try opcodeEqual(0x81, Insn.ldx_len());
+ try opcodeEqual(0xb1, Insn.ldx_msh(0));
+
+ try opcodeEqual(0x02, Insn.st(.m0));
+ try opcodeEqual(0x03, Insn.stx(.m0));
+
+ try opcodeEqual(0x04, Insn.alu(.add, .{ .k = 0 }));
+ try opcodeEqual(0x14, Insn.alu(.sub, .{ .k = 0 }));
+ try opcodeEqual(0x24, Insn.alu(.mul, .{ .k = 0 }));
+ try opcodeEqual(0x34, Insn.alu(.div, .{ .k = 0 }));
+ try opcodeEqual(0x44, Insn.alu(.@"or", .{ .k = 0 }));
+ try opcodeEqual(0x54, Insn.alu(.@"and", .{ .k = 0 }));
+ try opcodeEqual(0x64, Insn.alu(.lsh, .{ .k = 0 }));
+ try opcodeEqual(0x74, Insn.alu(.rsh, .{ .k = 0 }));
+ try opcodeEqual(0x94, Insn.alu(.mod, .{ .k = 0 }));
+ try opcodeEqual(0xa4, Insn.alu(.xor, .{ .k = 0 }));
+ try opcodeEqual(0x84, Insn.alu_neg());
+ try opcodeEqual(0x0c, Insn.alu(.add, .x));
+ try opcodeEqual(0x1c, Insn.alu(.sub, .x));
+ try opcodeEqual(0x2c, Insn.alu(.mul, .x));
+ try opcodeEqual(0x3c, Insn.alu(.div, .x));
+ try opcodeEqual(0x4c, Insn.alu(.@"or", .x));
+ try opcodeEqual(0x5c, Insn.alu(.@"and", .x));
+ try opcodeEqual(0x6c, Insn.alu(.lsh, .x));
+ try opcodeEqual(0x7c, Insn.alu(.rsh, .x));
+ try opcodeEqual(0x9c, Insn.alu(.mod, .x));
+ try opcodeEqual(0xac, Insn.alu(.xor, .x));
+
+ try opcodeEqual(0x05, Insn.jmp_ja(0));
+ try opcodeEqual(0x15, Insn.jmp(.jeq, .{ .k = 0 }, 0, 0));
+ try opcodeEqual(0x25, Insn.jmp(.jgt, .{ .k = 0 }, 0, 0));
+ try opcodeEqual(0x35, Insn.jmp(.jge, .{ .k = 0 }, 0, 0));
+ try opcodeEqual(0x45, Insn.jmp(.jset, .{ .k = 0 }, 0, 0));
+ try opcodeEqual(0x1d, Insn.jmp(.jeq, .x, 0, 0));
+ try opcodeEqual(0x2d, Insn.jmp(.jgt, .x, 0, 0));
+ try opcodeEqual(0x3d, Insn.jmp(.jge, .x, 0, 0));
+ try opcodeEqual(0x4d, Insn.jmp(.jset, .x, 0, 0));
+
+ try opcodeEqual(0x06, Insn.ret(.{ .k = 0 }));
+ try opcodeEqual(0x16, Insn.ret(.a));
+
+ try opcodeEqual(0x07, Insn.tax());
+ try opcodeEqual(0x87, Insn.txa());
+}
+
+pub const Error = error{
+ InvalidOpcode,
+ InvalidOffset,
+ InvalidLocation,
+ DivisionByZero,
+ NoReturn,
+};
+
+/// A simple implementation of the BPF virtual-machine.
+/// Use this to run/debug programs.
+pub fn simulate(
+ packet: []const u8,
+ filter: []const Insn,
+ byte_order: std.builtin.Endian,
+) Error!u32 {
+ assert(filter.len > 0 and filter.len < MAXINSNS);
+ assert(packet.len < MAXBUFSIZE);
+ const len = @intCast(u32, packet.len);
+
+ var a: u32 = 0;
+ var x: u32 = 0;
+ var m = mem.zeroes([MEMWORDS]u32);
+ var pc: usize = 0;
+
+ while (pc < filter.len) : (pc += 1) {
+ const i = filter[pc];
+ // Cast to a wider type to protect against overflow.
+ const k = @as(u64, i.k);
+ const remaining = filter.len - (pc + 1);
+
+ // Do validation/error checking here to compress the second switch.
+ switch (i.opcode) {
+ LD | ABS | W => if (k + @sizeOf(u32) - 1 >= packet.len) return error.InvalidOffset,
+ LD | ABS | H => if (k + @sizeOf(u16) - 1 >= packet.len) return error.InvalidOffset,
+ LD | ABS | B => if (k >= packet.len) return error.InvalidOffset,
+ LD | IND | W => if (k + x + @sizeOf(u32) - 1 >= packet.len) return error.InvalidOffset,
+ LD | IND | H => if (k + x + @sizeOf(u16) - 1 >= packet.len) return error.InvalidOffset,
+ LD | IND | B => if (k + x >= packet.len) return error.InvalidOffset,
+
+ LDX | MSH | B => if (k >= packet.len) return error.InvalidOffset,
+ ST, STX, LD | MEM, LDX | MEM => if (i.k >= MEMWORDS) return error.InvalidOffset,
+
+ JMP | JA => if (remaining <= i.k) return error.InvalidOffset,
+ JMP | JEQ | K,
+ JMP | JGT | K,
+ JMP | JGE | K,
+ JMP | JSET | K,
+ JMP | JEQ | X,
+ JMP | JGT | X,
+ JMP | JGE | X,
+ JMP | JSET | X,
+ => if (remaining <= i.jt or remaining <= i.jf) return error.InvalidLocation,
+ else => {},
+ }
+ switch (i.opcode) {
+ LD | IMM => a = i.k,
+ LD | MEM => a = m[i.k],
+ LD | LEN | W => a = len,
+ LD | RND | W => a = random.int(u32),
+ LD | ABS | W => a = mem.readInt(u32, packet[i.k..][0..@sizeOf(u32)], byte_order),
+ LD | ABS | H => a = mem.readInt(u16, packet[i.k..][0..@sizeOf(u16)], byte_order),
+ LD | ABS | B => a = packet[i.k],
+ LD | IND | W => a = mem.readInt(u32, packet[i.k + x ..][0..@sizeOf(u32)], byte_order),
+ LD | IND | H => a = mem.readInt(u16, packet[i.k + x ..][0..@sizeOf(u16)], byte_order),
+ LD | IND | B => a = packet[i.k + x],
+
+ LDX | IMM => x = i.k,
+ LDX | MEM => x = m[i.k],
+ LDX | LEN | W => x = len,
+ LDX | MSH | B => x = @as(u32, @truncate(u4, packet[i.k])) << 2,
+
+ ST => m[i.k] = a,
+ STX => m[i.k] = x,
+
+ ALU | ADD | K => a +%= i.k,
+ ALU | SUB | K => a -%= i.k,
+ ALU | MUL | K => a *%= i.k,
+ ALU | DIV | K => a = try math.divTrunc(u32, a, i.k),
+ ALU | OR | K => a |= i.k,
+ ALU | AND | K => a &= i.k,
+ ALU | LSH | K => a = math.shl(u32, a, i.k),
+ ALU | RSH | K => a = math.shr(u32, a, i.k),
+ ALU | MOD | K => a = try math.mod(u32, a, i.k),
+ ALU | XOR | K => a ^= i.k,
+ ALU | ADD | X => a +%= x,
+ ALU | SUB | X => a -%= x,
+ ALU | MUL | X => a *%= x,
+ ALU | DIV | X => a = try math.divTrunc(u32, a, x),
+ ALU | OR | X => a |= x,
+ ALU | AND | X => a &= x,
+ ALU | LSH | X => a = math.shl(u32, a, x),
+ ALU | RSH | X => a = math.shr(u32, a, x),
+ ALU | MOD | X => a = try math.mod(u32, a, x),
+ ALU | XOR | X => a ^= x,
+ ALU | NEG => a = @bitCast(u32, -%@bitCast(i32, a)),
+
+ JMP | JA => pc += i.k,
+ JMP | JEQ | K => pc += if (a == i.k) i.jt else i.jf,
+ JMP | JGT | K => pc += if (a > i.k) i.jt else i.jf,
+ JMP | JGE | K => pc += if (a >= i.k) i.jt else i.jf,
+ JMP | JSET | K => pc += if (a & i.k > 0) i.jt else i.jf,
+ JMP | JEQ | X => pc += if (a == x) i.jt else i.jf,
+ JMP | JGT | X => pc += if (a > x) i.jt else i.jf,
+ JMP | JGE | X => pc += if (a >= x) i.jt else i.jf,
+ JMP | JSET | X => pc += if (a & x > 0) i.jt else i.jf,
+
+ RET | K => return i.k,
+ RET | A => return a,
+
+ MISC | TAX => x = a,
+ MISC | TXA => a = x,
+ else => return error.InvalidOpcode,
+ }
+ }
+
+ return error.NoReturn;
+}
+
+// This program is the BPF form of the tcpdump filter:
+//
+// tcpdump -dd 'ip host mirror.internode.on.net and tcp port ftp-data'
+//
+// As of January 2022, mirror.internode.on.net resolves to 150.101.135.3
+//
+// For reference, here's what it looks like in BPF assembler.
+// Note that the jumps are used for TCP/IP layer checks.
+//
+// ```
+// ldh [12] (#proto)
+// jeq #0x0800 (ETHERTYPE_IP), L1, fail
+// L1: ld [26]
+// jeq #150.101.135.3, L2, dest
+// dest: ld [30]
+// jeq #150.101.135.3, L2, fail
+// L2: ldb [23]
+// jeq #0x6 (IPPROTO_TCP), L3, fail
+// L3: ldh [20]
+// jset #0x1fff, fail, plen
+// plen: ldx 4 * ([14] & 0xf)
+// ldh [x + 14]
+// jeq #0x14 (FTP), pass, dstp
+// dstp: ldh [x + 16]
+// jeq #0x14 (FTP), pass, fail
+// pass: ret #0x40000
+// fail: ret #0
+// ```
+const tcpdump_filter = [_]Insn{
+ Insn.ld_abs(.half_word, 12),
+ Insn.jmp(.jeq, .{ .k = 0x800 }, 0, 14),
+ Insn.ld_abs(.word, 26),
+ Insn.jmp(.jeq, .{ .k = 0x96658703 }, 2, 0),
+ Insn.ld_abs(.word, 30),
+ Insn.jmp(.jeq, .{ .k = 0x96658703 }, 0, 10),
+ Insn.ld_abs(.byte, 23),
+ Insn.jmp(.jeq, .{ .k = 0x6 }, 0, 8),
+ Insn.ld_abs(.half_word, 20),
+ Insn.jmp(.jset, .{ .k = 0x1fff }, 6, 0),
+ Insn.ldx_msh(14),
+ Insn.ld_ind(.half_word, 14),
+ Insn.jmp(.jeq, .{ .k = 0x14 }, 2, 0),
+ Insn.ld_ind(.half_word, 16),
+ Insn.jmp(.jeq, .{ .k = 0x14 }, 0, 1),
+ Insn.ret(.{ .k = 0x40000 }),
+ Insn.ret(.{ .k = 0 }),
+};
+
+// This packet is the output of `ls` on mirror.internode.on.net:/, captured
+// using the filter above.
+//
+// zig fmt: off
+const ftp_data = [_]u8{
+ // ethernet - 14 bytes: IPv4(0x0800) from a4:71:74:ad:4b:f0 -> de:ad:be:ef:f0:0f
+ 0xde, 0xad, 0xbe, 0xef, 0xf0, 0x0f, 0xa4, 0x71, 0x74, 0xad, 0x4b, 0xf0, 0x08, 0x00,
+ // IPv4 - 20 bytes: TCP data from 150.101.135.3 -> 192.168.1.3
+ 0x45, 0x00, 0x01, 0xf2, 0x70, 0x3b, 0x40, 0x00, 0x37, 0x06, 0xf2, 0xb6,
+ 0x96, 0x65, 0x87, 0x03, 0xc0, 0xa8, 0x01, 0x03,
+ // TCP - 32 bytes: Source port: 20 (FTP). Payload = 446 bytes
+ 0x00, 0x14, 0x80, 0x6d, 0x35, 0x81, 0x2d, 0x40, 0x4f, 0x8a, 0x29, 0x9e, 0x80, 0x18, 0x00, 0x2e,
+ 0x88, 0x8d, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x0b, 0x59, 0x5d, 0x09, 0x32, 0x8b, 0x51, 0xa0
+} ++
+ // Raw line-based FTP data - 446 bytes
+ "lrwxrwxrwx 1 root root 12 Feb 14 2012 debian -> .pub2/debian\r\n" ++
+ "lrwxrwxrwx 1 root root 15 Feb 14 2012 debian-cd -> .pub2/debian-cd\r\n" ++
+ "lrwxrwxrwx 1 root root 9 Mar 9 2018 linux -> pub/linux\r\n" ++
+ "drwxr-xr-X 3 mirror mirror 4096 Sep 20 08:10 pub\r\n" ++
+ "lrwxrwxrwx 1 root root 12 Feb 14 2012 ubuntu -> .pub2/ubuntu\r\n" ++
+ "-rw-r--r-- 1 root root 1044 Jan 20 2015 welcome.msg\r\n";
+// zig fmt: on
+
+test "tcpdump filter" {
+ try expectEqual(
+ @as(u32, 0x40000),
+ try simulate(ftp_data, &tcpdump_filter, .Big),
+ );
+}
+
+fn expectPass(data: anytype, filter: []Insn) !void {
+ try expectEqual(
+ @as(u32, 0),
+ try simulate(mem.asBytes(data), filter, .Big),
+ );
+}
+
+fn expectFail(expected_error: anyerror, data: anytype, filter: []Insn) !void {
+ try expectError(
+ expected_error,
+ simulate(mem.asBytes(data), filter, native_endian),
+ );
+}
+
+test "simulator coverage" {
+ const some_data: packed struct {
+ foo: u32,
+ bar: u8,
+ } = .{
+ .foo = mem.nativeToBig(u32, 0xaabbccdd),
+ .bar = 0x7f,
+ };
+
+ try expectPass(&some_data, &.{
+ // ld #10
+ // ldx #1
+ // st M[0]
+ // stx M[1]
+ // fail if A != 10
+ Insn.ld_imm(10),
+ Insn.ldx_imm(1),
+ Insn.st(.m0),
+ Insn.stx(.m1),
+ Insn.jmp(.jeq, .{ .k = 10 }, 1, 0),
+ Insn.ret(.{ .k = 1 }),
+ // ld [0]
+ // fail if A != 0xaabbccdd
+ Insn.ld_abs(.word, 0),
+ Insn.jmp(.jeq, .{ .k = 0xaabbccdd }, 1, 0),
+ Insn.ret(.{ .k = 2 }),
+ // ldh [0]
+ // fail if A != 0xaabb
+ Insn.ld_abs(.half_word, 0),
+ Insn.jmp(.jeq, .{ .k = 0xaabb }, 1, 0),
+ Insn.ret(.{ .k = 3 }),
+ // ldb [0]
+ // fail if A != 0xaa
+ Insn.ld_abs(.byte, 0),
+ Insn.jmp(.jeq, .{ .k = 0xaa }, 1, 0),
+ Insn.ret(.{ .k = 4 }),
+ // ld [x + 0]
+ // fail if A != 0xbbccdd7f
+ Insn.ld_ind(.word, 0),
+ Insn.jmp(.jeq, .{ .k = 0xbbccdd7f }, 1, 0),
+ Insn.ret(.{ .k = 5 }),
+ // ldh [x + 0]
+ // fail if A != 0xbbcc
+ Insn.ld_ind(.half_word, 0),
+ Insn.jmp(.jeq, .{ .k = 0xbbcc }, 1, 0),
+ Insn.ret(.{ .k = 6 }),
+ // ldb [x + 0]
+ // fail if A != 0xbb
+ Insn.ld_ind(.byte, 0),
+ Insn.jmp(.jeq, .{ .k = 0xbb }, 1, 0),
+ Insn.ret(.{ .k = 7 }),
+ // ld M[0]
+ // fail if A != 10
+ Insn.ld_mem(.m0),
+ Insn.jmp(.jeq, .{ .k = 10 }, 1, 0),
+ Insn.ret(.{ .k = 8 }),
+ // ld #len
+ // fail if A != 5
+ Insn.ld_len(),
+ Insn.jmp(.jeq, .{ .k = @sizeOf(@TypeOf(some_data)) }, 1, 0),
+ Insn.ret(.{ .k = 9 }),
+ // ld #0
+ // ld arc4random()
+ // fail if A == 0
+ Insn.ld_imm(0),
+ Insn.ld_rnd(),
+ Insn.jmp(.jgt, .{ .k = 0 }, 1, 0),
+ Insn.ret(.{ .k = 10 }),
+ // ld #3
+ // ldx #10
+ // st M[2]
+ // txa
+ // fail if a != x
+ Insn.ld_imm(3),
+ Insn.ldx_imm(10),
+ Insn.st(.m2),
+ Insn.txa(),
+ Insn.jmp(.jeq, .x, 1, 0),
+ Insn.ret(.{ .k = 11 }),
+ // ldx M[2]
+ // fail if A <= X
+ Insn.ldx_mem(.m2),
+ Insn.jmp(.jgt, .x, 1, 0),
+ Insn.ret(.{ .k = 12 }),
+ // ldx #len
+ // fail if a <= x
+ Insn.ldx_len(),
+ Insn.jmp(.jgt, .x, 1, 0),
+ Insn.ret(.{ .k = 13 }),
+ // a = 4 * (0x7f & 0xf)
+ // x = 4 * ([4] & 0xf)
+ // fail if a != x
+ Insn.ld_imm(4 * (0x7f & 0xf)),
+ Insn.ldx_msh(4),
+ Insn.jmp(.jeq, .x, 1, 0),
+ Insn.ret(.{ .k = 14 }),
+ // ld #(u32)-1
+ // ldx #2
+ // add #1
+ // fail if a != 0
+ Insn.ld_imm(0xffffffff),
+ Insn.ldx_imm(2),
+ Insn.alu(.add, .{ .k = 1 }),
+ Insn.jmp(.jeq, .{ .k = 0 }, 1, 0),
+ Insn.ret(.{ .k = 15 }),
+ // sub #1
+ // fail if a != (u32)-1
+ Insn.alu(.sub, .{ .k = 1 }),
+ Insn.jmp(.jeq, .{ .k = 0xffffffff }, 1, 0),
+ Insn.ret(.{ .k = 16 }),
+ // add x
+ // fail if a != 1
+ Insn.alu(.add, .x),
+ Insn.jmp(.jeq, .{ .k = 1 }, 1, 0),
+ Insn.ret(.{ .k = 17 }),
+ // sub x
+ // fail if a != (u32)-1
+ Insn.alu(.sub, .x),
+ Insn.jmp(.jeq, .{ .k = 0xffffffff }, 1, 0),
+ Insn.ret(.{ .k = 18 }),
+ // ld #16
+ // mul #2
+ // fail if a != 32
+ Insn.ld_imm(16),
+ Insn.alu(.mul, .{ .k = 2 }),
+ Insn.jmp(.jeq, .{ .k = 32 }, 1, 0),
+ Insn.ret(.{ .k = 19 }),
+ // mul x
+ // fail if a != 64
+ Insn.alu(.mul, .x),
+ Insn.jmp(.jeq, .{ .k = 64 }, 1, 0),
+ Insn.ret(.{ .k = 20 }),
+ // div #2
+ // fail if a != 32
+ Insn.alu(.div, .{ .k = 2 }),
+ Insn.jmp(.jeq, .{ .k = 32 }, 1, 0),
+ Insn.ret(.{ .k = 21 }),
+ // div x
+ // fail if a != 16
+ Insn.alu(.div, .x),
+ Insn.jmp(.jeq, .{ .k = 16 }, 1, 0),
+ Insn.ret(.{ .k = 22 }),
+ // or #4
+ // fail if a != 20
+ Insn.alu(.@"or", .{ .k = 4 }),
+ Insn.jmp(.jeq, .{ .k = 20 }, 1, 0),
+ Insn.ret(.{ .k = 23 }),
+ // or x
+ // fail if a != 22
+ Insn.alu(.@"or", .x),
+ Insn.jmp(.jeq, .{ .k = 22 }, 1, 0),
+ Insn.ret(.{ .k = 24 }),
+ // and #6
+ // fail if a != 6
+ Insn.alu(.@"and", .{ .k = 0b110 }),
+ Insn.jmp(.jeq, .{ .k = 6 }, 1, 0),
+ Insn.ret(.{ .k = 25 }),
+ // and x
+ // fail if a != 2
+ Insn.alu(.@"and", .x),
+ Insn.jmp(.jeq, .x, 1, 0),
+ Insn.ret(.{ .k = 26 }),
+ // xor #15
+ // fail if a != 13
+ Insn.alu(.xor, .{ .k = 0b1111 }),
+ Insn.jmp(.jeq, .{ .k = 0b1101 }, 1, 0),
+ Insn.ret(.{ .k = 27 }),
+ // xor x
+ // fail if a != 15
+ Insn.alu(.xor, .x),
+ Insn.jmp(.jeq, .{ .k = 0b1111 }, 1, 0),
+ Insn.ret(.{ .k = 28 }),
+ // rsh #1
+ // fail if a != 7
+ Insn.alu(.rsh, .{ .k = 1 }),
+ Insn.jmp(.jeq, .{ .k = 0b0111 }, 1, 0),
+ Insn.ret(.{ .k = 29 }),
+ // rsh x
+ // fail if a != 1
+ Insn.alu(.rsh, .x),
+ Insn.jmp(.jeq, .{ .k = 0b0001 }, 1, 0),
+ Insn.ret(.{ .k = 30 }),
+ // lsh #1
+ // fail if a != 2
+ Insn.alu(.lsh, .{ .k = 1 }),
+ Insn.jmp(.jeq, .{ .k = 0b0010 }, 1, 0),
+ Insn.ret(.{ .k = 31 }),
+ // lsh x
+ // fail if a != 8
+ Insn.alu(.lsh, .x),
+ Insn.jmp(.jeq, .{ .k = 0b1000 }, 1, 0),
+ Insn.ret(.{ .k = 32 }),
+ // mod 6
+ // fail if a != 2
+ Insn.alu(.mod, .{ .k = 6 }),
+ Insn.jmp(.jeq, .{ .k = 2 }, 1, 0),
+ Insn.ret(.{ .k = 33 }),
+ // mod x
+ // fail if a != 0
+ Insn.alu(.mod, .x),
+ Insn.jmp(.jeq, .{ .k = 0 }, 1, 0),
+ Insn.ret(.{ .k = 34 }),
+ // tax
+ // neg
+ // fail if a != (u32)-2
+ Insn.txa(),
+ Insn.alu_neg(),
+ Insn.jmp(.jeq, .{ .k = ~@as(u32, 2) + 1 }, 1, 0),
+ Insn.ret(.{ .k = 35 }),
+ // ja #1 (skip the next instruction)
+ Insn.jmp_ja(1),
+ Insn.ret(.{ .k = 36 }),
+ // ld #20
+ // tax
+ // fail if a != 20
+ // fail if a != x
+ Insn.ld_imm(20),
+ Insn.tax(),
+ Insn.jmp(.jeq, .{ .k = 20 }, 1, 0),
+ Insn.ret(.{ .k = 37 }),
+ Insn.jmp(.jeq, .x, 1, 0),
+ Insn.ret(.{ .k = 38 }),
+ // ld #19
+ // fail if a == 20
+ // fail if a == x
+ // fail if a >= 20
+ // fail if a >= X
+ Insn.ld_imm(19),
+ Insn.jmp(.jeq, .{ .k = 20 }, 0, 1),
+ Insn.ret(.{ .k = 39 }),
+ Insn.jmp(.jeq, .x, 0, 1),
+ Insn.ret(.{ .k = 40 }),
+ Insn.jmp(.jgt, .{ .k = 20 }, 0, 1),
+ Insn.ret(.{ .k = 41 }),
+ Insn.jmp(.jgt, .x, 0, 1),
+ Insn.ret(.{ .k = 42 }),
+ // ld #21
+ // fail if a < 20
+ // fail if a < x
+ Insn.ld_imm(21),
+ Insn.jmp(.jgt, .{ .k = 20 }, 1, 0),
+ Insn.ret(.{ .k = 43 }),
+ Insn.jmp(.jgt, .x, 1, 0),
+ Insn.ret(.{ .k = 44 }),
+ // ldx #22
+ // fail if a < 22
+ // fail if a < x
+ Insn.ldx_imm(22),
+ Insn.jmp(.jge, .{ .k = 22 }, 0, 1),
+ Insn.ret(.{ .k = 45 }),
+ Insn.jmp(.jge, .x, 0, 1),
+ Insn.ret(.{ .k = 46 }),
+ // ld #23
+ // fail if a >= 22
+ // fail if a >= x
+ Insn.ld_imm(23),
+ Insn.jmp(.jge, .{ .k = 22 }, 1, 0),
+ Insn.ret(.{ .k = 47 }),
+ Insn.jmp(.jge, .x, 1, 0),
+ Insn.ret(.{ .k = 48 }),
+ // ldx #0b10100
+ // fail if a & 0b10100 == 0
+ // fail if a & x == 0
+ Insn.ldx_imm(0b10100),
+ Insn.jmp(.jset, .{ .k = 0b10100 }, 1, 0),
+ Insn.ret(.{ .k = 47 }),
+ Insn.jmp(.jset, .x, 1, 0),
+ Insn.ret(.{ .k = 48 }),
+ // ldx #0
+ // fail if a & 0 > 0
+ // fail if a & x > 0
+ Insn.ldx_imm(0),
+ Insn.jmp(.jset, .{ .k = 0 }, 0, 1),
+ Insn.ret(.{ .k = 49 }),
+ Insn.jmp(.jset, .x, 0, 1),
+ Insn.ret(.{ .k = 50 }),
+ Insn.ret(.{ .k = 0 }),
+ });
+ try expectPass(&some_data, &.{
+ Insn.ld_imm(35),
+ Insn.ld_imm(0),
+ Insn.ret(.a),
+ });
+
+ // Errors
+ try expectFail(error.NoReturn, &some_data, &.{
+ Insn.ld_imm(10),
+ });
+ try expectFail(error.InvalidOpcode, &some_data, &.{
+ Insn.stmt(0x7f, 0xdeadbeef),
+ });
+ try expectFail(error.InvalidOffset, &some_data, &.{
+ Insn.stmt(LD | ABS | W, 10),
+ });
+ try expectFail(error.InvalidLocation, &some_data, &.{
+ Insn.jmp(.jeq, .{ .k = 0 }, 10, 0),
+ });
+ try expectFail(error.InvalidLocation, &some_data, &.{
+ Insn.jmp(.jeq, .{ .k = 0 }, 0, 10),
+ });
+}
From a49f2d9f8d45ea5d556ebcb24c6f38a13ec3e94b Mon Sep 17 00:00:00 2001
From: Stephen Gregoratto
Date: Sat, 29 Jan 2022 13:27:18 +1100
Subject: [PATCH 0018/2031] Add bits for the Linux Auditing System
Also adds the _CSKY and _FRV ELF machines that are defined in
``
---
lib/std/elf.zig | 6 +++++
lib/std/os/linux.zig | 52 ++++++++++++++++++++++++++++++++++++++++++++
lib/std/target.zig | 2 +-
3 files changed, 59 insertions(+), 1 deletion(-)
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index 6c3b748c58..1fbad2837b 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -1480,6 +1480,12 @@ pub const EM = enum(u16) {
/// Linux kernel bpf virtual machine
_BPF = 247,
+ /// C-SKY
+ _CSKY = 252,
+
+ /// Fujitsu FR-V
+ _FRV = 0x5441,
+
_,
};
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index c1591f7ea1..fe0b71b570 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -5369,3 +5369,55 @@ pub const PERF = struct {
pub const IOC_FLAG_GROUP = 1;
};
+
+// TODO: Add the rest of the AUDIT defines?
+pub const AUDIT = struct {
+ pub const ARCH = enum(u32) {
+ const _64BIT = 0x80000000;
+ const _LE = 0x40000000;
+
+ pub const current = switch (native_arch) {
+ .i386 => .I386,
+ .x86_64 => .X86_64,
+ .aarch64 => .AARCH64,
+ .arm, .thumb => .ARM,
+ .riscv64 => .RISCV64,
+ .sparcv9 => .SPARC64,
+ .mips => .MIPS,
+ .mipsel => .MIPSEL,
+ .powerpc => .PPC,
+ .powerpc64 => .PPC64,
+ .powerpc64le => .PPC64LE,
+ else => undefined,
+ };
+
+ AARCH64 = toAudit(.aarch64),
+ ARM = toAudit(.arm),
+ ARMEB = toAudit(.armeb),
+ CSKY = toAudit(.csky),
+ HEXAGON = @enumToInt(std.elf.EM._HEXAGON),
+ I386 = toAudit(.i386),
+ M68K = toAudit(.m68k),
+ MIPS = toAudit(.mips),
+ MIPSEL = toAudit(.mips) | _LE,
+ MIPS64 = toAudit(.mips64),
+ MIPSEL64 = toAudit(.mips64) | _LE,
+ PPC = toAudit(.powerpc),
+ PPC64 = toAudit(.powerpc64),
+ PPC64LE = toAudit(.powerpc64le),
+ RISCV32 = toAudit(.riscv32),
+ RISCV64 = toAudit(.riscv64),
+ S390X = toAudit(.s390x),
+ SPARC = toAudit(.sparc),
+ SPARC64 = toAudit(.sparcv9),
+ X86_64 = toAudit(.x86_64),
+
+ fn toAudit(arch: std.Target.Cpu.Arch) u32 {
+ var res: u32 = @enumToInt(arch.toElfMachine());
+ if (arch.endian() == .Little) res |= _LE;
+ if (arch.ptrBitWidth() == 64) res |= _64BIT;
+
+ return res;
+ }
+ };
+};
diff --git a/lib/std/target.zig b/lib/std/target.zig
index ca1f668ca4..5f22923108 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -962,7 +962,7 @@ pub const Target = struct {
.amdgcn => ._NONE,
.bpfel => ._BPF,
.bpfeb => ._BPF,
- .csky => ._NONE,
+ .csky => ._CSKY,
.sparcv9 => ._SPARCV9,
.s390x => ._S390,
.ve => ._NONE,
From 29013220d95f60669c4a181d157157aea9f137b5 Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Sun, 30 Jan 2022 15:24:03 +0100
Subject: [PATCH 0019/2031] wasm: Implement elem_ptr
This implements lowering elem_ptr for decl's and constants.
To generate the correct pointer, we perform a relocation by using the addend
that represents the offset. The offset is calculated by taking the element's size
and multiplying that by the index.
For constants this generates a single immediate instruction, and for decl's
this generates a single pointer address.
---
src/arch/wasm/CodeGen.zig | 73 ++++++++++++++++++++++++++++++++++++---
src/arch/wasm/Emit.zig | 10 +++---
src/arch/wasm/Mir.zig | 7 ++++
src/link/Wasm.zig | 12 ++++++-
4 files changed, 92 insertions(+), 10 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 7d2046b90b..2d0cf57fd4 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -39,6 +39,14 @@ const WValue = union(enum) {
/// Note: The value contains the symbol index, rather than the actual address
/// as we use this to perform the relocation.
memory: u32,
+ /// A value that represents a parent pointer and an offset
+ /// from that pointer. i.e. when slicing with constant values.
+ memory_offset: struct {
+ /// The symbol of the parent pointer
+ pointer: u32,
+ /// Offset will be set as 'addend' when relocating
+ offset: u32,
+ },
/// Represents a function pointer
/// In wasm function pointers are indexes into a function table,
/// rather than an address in the data section.
@@ -754,7 +762,14 @@ fn emitWValue(self: *Self, value: WValue) InnerError!void {
.imm64 => |val| try self.addImm64(val),
.float32 => |val| try self.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
.float64 => |val| try self.addFloat64(val),
- .memory => |ptr| try self.addLabel(.memory_address, ptr), // write sybol address and generate relocation
+ .memory => |ptr| {
+ const extra_index = try self.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 });
+ try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
+ },
+ .memory_offset => |mem_off| {
+ const extra_index = try self.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset });
+ try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
+ },
.function_index => |index| try self.addLabel(.function_index, index), // write function index and generate relocation
}
}
@@ -927,7 +942,7 @@ pub const DeclGen = struct {
.function => val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
- return try self.lowerDeclRef(ty, val, fn_decl);
+ return try self.lowerDeclRefValue(ty, val, fn_decl, 0);
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -1115,11 +1130,11 @@ pub const DeclGen = struct {
.Pointer => switch (val.tag()) {
.variable => {
const decl = val.castTag(.variable).?.data.owner_decl;
- return self.lowerDeclRef(ty, val, decl);
+ return self.lowerDeclRefValue(ty, val, decl, 0);
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
- return self.lowerDeclRef(ty, val, decl);
+ return self.lowerDeclRefValue(ty, val, decl, writer, 0);
},
.slice => {
const slice = val.castTag(.slice).?.data;
@@ -1139,6 +1154,13 @@ pub const DeclGen = struct {
try writer.writeByteNTimes(0, @divExact(self.target().cpu.arch.ptrBitWidth(), 8));
return Result{ .appended = {} };
},
+ .elem_ptr => {
+ const elem_ptr = val.castTag(.elem_ptr).?.data;
+ const elem_size = ty.childType().abiSize(self.target());
+ const offset = elem_ptr.index * elem_size;
+ return self.lowerParentPtr(elem_ptr.array_ptr, writer, offset);
+ },
+ .int_u64 => return self.genTypedValue(Type.usize, val, writer),
else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}),
},
.ErrorUnion => {
@@ -1179,7 +1201,36 @@ pub const DeclGen = struct {
}
}
- fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl) InnerError!Result {
+ fn lowerParentPtr(self: *DeclGen, ptr_value: Value, offset: usize) InnerError!Result {
+ switch (ptr_value.tag()) {
+ .decl_ref => {
+ const decl = ptr_value.castTag(.decl_ref).?.data;
+ return self.lowerParentPtrDecl(ptr_value, decl, offset);
+ },
+ else => |tag| return self.fail("TODO: Implement lowerParentPtr for pointer value tag: {s}", .{tag}),
+ }
+ }
+
+ fn lowerParentPtrDecl(self: *DeclGen, ptr_val: Value, decl: *Module.Decl, offset: usize) InnerError!Result {
+ decl.markAlive();
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = decl.ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ return self.lowerDeclRefValue(ptr_ty, ptr_val, decl, offset);
+ }
+
+ fn lowerDeclRefValue(
+ self: *DeclGen,
+ ty: Type,
+ val: Value,
+ /// The target decl that is being pointed to
+ decl: *Module.Decl,
+ /// When lowering to an indexed pointer, we can specify the offset
+ /// which will then be used as 'addend' to the relocation.
+ offset: usize,
+ ) InnerError!Result {
const writer = self.code.writer();
if (ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
@@ -1202,6 +1253,7 @@ pub const DeclGen = struct {
self.symbol_index, // source symbol index
decl.link.wasm.sym_index, // target symbol index
@intCast(u32, self.code.items.len), // offset
+ @intCast(u32, offset), // addend
));
return Result{ .appended = {} };
}
@@ -1974,6 +2026,17 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
return WValue{ .function_index = target_sym_index };
} else return WValue{ .memory = target_sym_index };
},
+ .elem_ptr => {
+ const elem_ptr = val.castTag(.elem_ptr).?.data;
+ const index = elem_ptr.index;
+ const offset = index * ty.childType().abiSize(self.target);
+ const array_ptr = try self.lowerConstant(elem_ptr.array_ptr, ty);
+
+ return WValue{ .memory_offset = .{
+ .pointer = array_ptr.memory,
+ .offset = @intCast(u32, offset),
+ } };
+ },
.int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) },
.zero, .null_value => return WValue{ .imm32 = 0 },
else => return self.fail("Wasm TODO: lowerConstant for other const pointer tag {s}", .{val.tag()}),
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 9283a0e0b7..8cae78caf1 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -326,25 +326,27 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
}
fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
- const symbol_index = emit.mir.instructions.items(.data)[inst].label;
+ const extra_index = emit.mir.instructions.items(.data)[inst].payload;
+ const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
- leb128.writeUnsignedFixed(5, &buf, symbol_index);
+ leb128.writeUnsignedFixed(5, &buf, mem.pointer);
try emit.code.appendSlice(&buf);
} else {
try emit.code.append(std.wasm.opcode(.i64_const));
var buf: [10]u8 = undefined;
- leb128.writeUnsignedFixed(10, &buf, symbol_index);
+ leb128.writeUnsignedFixed(10, &buf, mem.pointer);
try emit.code.appendSlice(&buf);
}
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
.offset = mem_offset,
- .index = symbol_index,
+ .index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
+ .addend = mem.offset,
});
}
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 07696f0dd3..ed0867e583 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -546,3 +546,10 @@ pub const MemArg = struct {
offset: u32,
alignment: u32,
};
+
+/// Represents a memory address, which holds both the pointer
+/// or the parent pointer and the offset to it.
+pub const Memory = struct {
+ pointer: u32,
+ offset: u32,
+};
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index a5d4630378..b047e4b68a 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -345,10 +345,19 @@ pub fn updateLocalSymbolCode(self: *Wasm, decl: *Module.Decl, symbol_index: u32,
/// For a given decl, find the given symbol index's atom, and create a relocation for the type.
/// Returns the given pointer address
-pub fn getDeclVAddr(self: *Wasm, decl: *Module.Decl, ty: Type, symbol_index: u32, target_symbol_index: u32, offset: u32) !u32 {
+pub fn getDeclVAddr(
+ self: *Wasm,
+ decl: *Module.Decl,
+ ty: Type,
+ symbol_index: u32,
+ target_symbol_index: u32,
+ offset: u32,
+ addend: u32,
+) !u32 {
const atom = decl.link.wasm.symbolAtom(symbol_index);
const is_wasm32 = self.base.options.target.cpu.arch == .wasm32;
if (ty.zigTypeTag() == .Fn) {
+ std.debug.assert(addend == 0); // addend not allowed for function relocations
// We found a function pointer, so add it to our table,
// as function pointers are not allowed to be stored inside the data section.
// They are instead stored in a function table which are called by index.
@@ -363,6 +372,7 @@ pub fn getDeclVAddr(self: *Wasm, decl: *Module.Decl, ty: Type, symbol_index: u32
.index = target_symbol_index,
.offset = offset,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64,
+ .addend = addend,
});
}
// we do not know the final address at this point,
From ae1e3c8f9bc86eeefb5a83233884a134f7b974f4 Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Mon, 31 Jan 2022 21:12:30 +0100
Subject: [PATCH 0020/2031] wasm: Implement vector_init for array & structs
Implements the instruction `vector_init` for structs and arrays.
For arrays, it checks if the element must be passed by reference or not.
When not, it can simply use the `offset` field of a store instruction to copy the values
into the array. When it is byref, it will move the pointer by the element size, and then perform
a store operation. This ensures types like structs will be moved into the right position.
For structs we will always move the pointer, as we currently cannot verify if all fields are
not by ref.
---
src/arch/wasm/CodeGen.zig | 116 +++++++++++++++++++++++++++++---------
1 file changed, 89 insertions(+), 27 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 2d0cf57fd4..420fbdf4ab 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -44,7 +44,7 @@ const WValue = union(enum) {
memory_offset: struct {
/// The symbol of the parent pointer
pointer: u32,
- /// Offset will be set as 'addend' when relocating
+ /// Offset will be set as addend when relocating
offset: u32,
},
/// Represents a function pointer
@@ -606,7 +606,10 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
// means we must generate it from a constant.
const val = self.air.value(ref).?;
const ty = self.air.typeOf(ref);
- if (!ty.hasRuntimeBits() and !ty.isInt()) return WValue{ .none = {} };
+ if (!ty.hasRuntimeBits() and !ty.isInt()) {
+ gop.value_ptr.* = WValue{ .none = {} };
+ return gop.value_ptr.*;
+ }
// When we need to pass the value by reference (such as a struct), we will
// leverage `genTypedValue` to lower the constant to bytes and emit it
@@ -1644,6 +1647,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
+
// result must be stored in the stack and we return a pointer
// to the stack instead
if (self.return_value != .none) {
@@ -1653,7 +1657,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
try self.restoreStackPointer();
try self.addTag(.@"return");
- return .none;
+ return WValue{ .none = {} };
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -1793,11 +1797,10 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
const err_ty = ty.errorUnionSet();
const pl_ty = ty.errorUnionPayload();
if (!pl_ty.hasRuntimeBits()) {
- const err_val = try self.load(rhs, err_ty, 0);
- return self.store(lhs, err_val, err_ty, 0);
+ return self.store(lhs, rhs, err_ty, 0);
}
- return try self.memCopy(ty, lhs, rhs);
+ return self.memCopy(ty, lhs, rhs);
},
.Optional => {
if (ty.isPtrLikeOptional()) {
@@ -1812,7 +1815,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
return self.memCopy(ty, lhs, rhs);
},
.Struct, .Array, .Union => {
- return try self.memCopy(ty, lhs, rhs);
+ return self.memCopy(ty, lhs, rhs);
},
.Pointer => {
if (ty.isSlice()) {
@@ -1827,7 +1830,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
}
},
.Int => if (ty.intInfo(self.target).bits > 64) {
- return try self.memCopy(ty, lhs, rhs);
+ return self.memCopy(ty, lhs, rhs);
},
else => {},
}
@@ -2587,11 +2590,11 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue
if (isByRef(payload_ty, self.target)) {
return self.buildPointerOffset(operand, offset, .new);
}
- return try self.load(operand, payload_ty, offset);
+ return self.load(operand, payload_ty, offset);
}
fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -2601,11 +2604,12 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return operand;
}
- return try self.load(operand, err_ty.errorUnionSet(), 0);
+ return self.load(operand, err_ty.errorUnionSet(), 0);
}
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
+
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -2627,11 +2631,14 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
+
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_ty = self.air.getRefType(ty_op.ty);
+ if (!err_ty.errorUnionPayload().hasRuntimeBits()) return operand;
+
const err_union = try self.allocStack(err_ty);
// TODO: Also write 'undefined' to the payload
try self.store(err_union, operand, err_ty.errorUnionSet(), 0);
@@ -2813,16 +2820,16 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return try self.load(operand, Type.usize, self.ptrSize());
+ return self.load(operand, Type.usize, self.ptrSize());
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
@@ -2847,7 +2854,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
- return try self.load(result, elem_ty, 0);
+ return self.load(result, elem_ty, 0);
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2875,10 +2882,10 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return try self.load(operand, Type.usize, 0);
+ return self.load(operand, Type.usize, 0);
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2943,7 +2950,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- return try self.resolveInst(un_op);
+ return self.resolveInst(un_op);
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2975,7 +2982,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const un_op = self.air.instructions.items(.data)[inst].un_op;
- return try self.resolveInst(un_op);
+ return self.resolveInst(un_op);
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2990,7 +2997,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(pointer, ptr_ty, 0);
+ const ptr_local = try self.load(pointer, Type.usize, 0);
try self.addLabel(.local_get, ptr_local.local);
} else {
try self.emitWValue(pointer);
@@ -3007,7 +3014,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
- return try self.load(result, elem_ty, 0);
+ return self.load(result, elem_ty, 0);
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3023,7 +3030,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(ptr, ptr_ty, 0);
+ const ptr_local = try self.load(ptr, Type.usize, 0);
try self.addLabel(.local_get, ptr_local.local);
} else {
try self.emitWValue(ptr);
@@ -3157,7 +3164,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
- return try self.load(result, elem_ty, 0);
+ return self.load(result, elem_ty, 0);
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3201,8 +3208,63 @@ fn airVectorInit(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
- _ = elements;
- return self.fail("TODO: Wasm backend: implement airVectorInit", .{});
+ switch (vector_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO: Wasm backend: implement airVectorInit for vectors", .{}),
+ .Array => {
+ const result = try self.allocStack(vector_ty);
+ const elem_ty = vector_ty.childType();
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target));
+
+ // When the element type is by reference, we must copy the entire
+ // value. It is therefore safer to move the offset pointer and store
+ // each value individually, instead of using store offsets.
+ if (isByRef(elem_ty, self.target)) {
+ // copy stack pointer into a temporary local, which is
+ // moved for each element to store each value in the right position.
+ const offset = try self.allocLocal(Type.usize);
+ try self.emitWValue(result);
+ try self.addLabel(.local_set, offset.local);
+ for (elements) |elem, elem_index| {
+ const elem_val = try self.resolveInst(elem);
+ try self.store(offset, elem_val, elem_ty, 0);
+
+ if (elem_index < elements.len - 1) {
+ _ = try self.buildPointerOffset(offset, elem_size, .modify);
+ }
+ }
+ } else {
+ var offset: u32 = 0;
+ for (elements) |elem| {
+ const elem_val = try self.resolveInst(elem);
+ try self.store(result, elem_val, elem_ty, offset);
+ offset += elem_size;
+ }
+ }
+ return result;
+ },
+ .Struct => {
+ const tuple = vector_ty.castTag(.tuple).?.data;
+ const result = try self.allocStack(vector_ty);
+ const offset = try self.allocLocal(Type.usize); // pointer to offset
+ try self.emitWValue(result);
+ try self.addLabel(.local_set, offset.local);
+ for (elements) |elem, elem_index| {
+ if (tuple.values[elem_index].tag() != .unreachable_value) continue;
+
+ const elem_ty = tuple.types[elem_index];
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target));
+ const value = try self.resolveInst(elem);
+ try self.store(offset, value, elem_ty, 0);
+
+ if (elem_index < elements.len - 1) {
+ _ = try self.buildPointerOffset(offset, elem_size, .modify);
+ }
+ }
+
+ return result;
+ },
+ else => unreachable,
+ }
}
fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
From e35414bf5c356798f201be85303101f59220326c Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Thu, 3 Feb 2022 21:31:35 +0100
Subject: [PATCH 0021/2031] wasm: Refactor stack to account for alignment
We now calculate the total stack size required for the current frame.
The default alignment of the stack is 16 bytes, and will be overwritten when the alignment
of a given type is larger than that.
After we have generated all instructions for the body, we calculate the total stack size
by forward aligning the stack size while accounting for the max alignment.
We then insert a prologue into the body, where we substract this size from the stack pointer
and save it inside a bottom stackframe local. We use this local then, to calculate
the stack pointer locals of all variables we allocate into the stack.
In a future iteration we can improve this further by storing the offsets as a new `stack_offset` `WValue`.
This has the benefit of not having to spend runtime cost of storing those offsets, but instead we append
those offsets whenever we need the value that lives in the stack.
---
src/arch/wasm/CodeGen.zig | 168 +++++++++++++++++++++++++-------------
1 file changed, 110 insertions(+), 58 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 420fbdf4ab..d2db5fd92b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -560,6 +560,9 @@ mir_extra: std.ArrayListUnmanaged(u32) = .{},
/// When a function is executing, we store the the current stack pointer's value within this local.
/// This value is then used to restore the stack pointer to the original value at the return of the function.
initial_stack_value: WValue = .none,
+/// The current stack pointer substracted with the stack size. From this value, we will calculate
+/// all offsets of the stack values.
+bottom_stack_value: WValue = .none,
/// Arguments of this function declaration
/// This will be set after `resolveCallingConventionValues`
args: []WValue = &.{},
@@ -567,6 +570,14 @@ args: []WValue = &.{},
/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
/// before this function returns its execution to the caller.
return_value: WValue = .none,
+/// The size of the stack this function occupies. In the function prologue
+/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`.
+stack_size: u32 = 0,
+/// The stack alignment, which is 16 bytes by default. This is specified by the
+/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
+/// and also what the llvm backend will emit.
+/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
+stack_alignment: u32 = 16,
const InnerError = error{
OutOfMemory,
@@ -654,13 +665,6 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!void {
try self.mir_instructions.append(self.gpa, inst);
}
-/// Inserts a Mir instruction at the given `offset`.
-/// Asserts offset is within bound.
-fn addInstAt(self: *Self, offset: usize, inst: Mir.Inst) error{OutOfMemory}!void {
- try self.mir_instructions.ensureUnusedCapacity(self.gpa, 1);
- self.mir_instructions.insertAssumeCapacity(offset, inst);
-}
-
fn addTag(self: *Self, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
try self.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
}
@@ -845,10 +849,43 @@ pub fn genFunc(self: *Self) InnerError!void {
try self.addTag(.@"unreachable");
}
}
-
// End of function body
try self.addTag(.end);
+ // check if we have to initialize and allocate anything into the stack frame.
+ // If so, create enough stack space and insert the instructions at the front of the list.
+ if (self.stack_size > 0) {
+ var prologue = std.ArrayList(Mir.Inst).init(self.gpa);
+ defer prologue.deinit();
+
+ // load stack pointer
+ try prologue.append(.{ .tag = .global_get, .data = .{ .label = 0 } });
+ // store stack pointer so we can restore it when we return from the function
+ try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.initial_stack_value.local } });
+ // get the total stack size
+ const aligned_stack = std.mem.alignForwardGeneric(u32, self.stack_size, self.stack_alignment);
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
+ // substract it from the current stack pointer
+ try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
+ // Get negative stack aligment
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, self.stack_alignment) * -1 } });
+ // Bit and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
+ try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
+ // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
+ try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.bottom_stack_value.local } });
+ // Store the current stack pointer value into the global stack pointer so other function calls will
+ // start from this value instead and not overwrite the current stack.
+ try prologue.append(.{ .tag = .global_set, .data = .{ .label = 0 } });
+
+ // reserve space and insert all prologue instructions at the front of the instruction list
+ // We insert them in reserve order as there is no insertSlice in multiArrayList.
+ try self.mir_instructions.ensureUnusedCapacity(self.gpa, prologue.items.len);
+ for (prologue.items) |_, index| {
+ const inst = prologue.items[prologue.items.len - 1 - index];
+ self.mir_instructions.insertAssumeCapacity(0, inst);
+ }
+ }
+
var mir: Mir = .{
.instructions = self.mir_instructions.toOwnedSlice(),
.extra = self.mir_extra.toOwnedSlice(self.gpa),
@@ -1137,7 +1174,7 @@ pub const DeclGen = struct {
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
- return self.lowerDeclRefValue(ty, val, decl, writer, 0);
+ return self.lowerDeclRefValue(ty, val, decl, 0);
},
.slice => {
const slice = val.castTag(.slice).?.data;
@@ -1161,9 +1198,9 @@ pub const DeclGen = struct {
const elem_ptr = val.castTag(.elem_ptr).?.data;
const elem_size = ty.childType().abiSize(self.target());
const offset = elem_ptr.index * elem_size;
- return self.lowerParentPtr(elem_ptr.array_ptr, writer, offset);
+ return self.lowerParentPtr(elem_ptr.array_ptr, offset);
},
- .int_u64 => return self.genTypedValue(Type.usize, val, writer),
+ .int_u64 => return self.genTypedValue(Type.usize, val),
else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}),
},
.ErrorUnion => {
@@ -1309,22 +1346,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
return result;
}
-/// Retrieves the stack pointer's value from the global variable and stores
-/// it in a local
+/// Creates a local for the initial stack value
/// Asserts `initial_stack_value` is `.none`
fn initializeStack(self: *Self) !void {
assert(self.initial_stack_value == .none);
- // reserve space for immediate value
- // get stack pointer global
- try self.addLabel(.global_get, 0);
-
// Reserve a local to store the current stack pointer
// We can later use this local to set the stack pointer back to the value
// we have stored here.
- self.initial_stack_value = try self.allocLocal(Type.initTag(.i32));
-
- // save the value to the local
- try self.addLabel(.local_set, self.initial_stack_value.local);
+ self.initial_stack_value = try self.allocLocal(Type.usize);
+ // Also reserve a local to store the bottom stack value
+ self.bottom_stack_value = try self.allocLocal(Type.usize);
}
/// Reads the stack pointer from `Context.initial_stack_value` and writes it
@@ -1339,36 +1370,75 @@ fn restoreStackPointer(self: *Self) !void {
try self.addLabel(.global_set, 0);
}
-/// Moves the stack pointer by given `offset`
-/// It does this by retrieving the stack pointer, subtracting `offset` and storing
-/// the result back into the stack pointer.
-fn moveStack(self: *Self, offset: u32, local: u32) !void {
- if (offset == 0) return;
- try self.addLabel(.global_get, 0);
- try self.addImm32(@bitCast(i32, offset));
- try self.addTag(.i32_sub);
- try self.addLabel(.local_tee, local);
- try self.addLabel(.global_set, 0);
+/// Saves the current stack size's stack pointer position into a given local
+/// It does this by retrieving the bottom stack pointer, adding `self.stack_size` and storing
+/// the result back into the local.
+fn saveStack(self: *Self) !WValue {
+ const local = try self.allocLocal(Type.usize);
+ try self.addLabel(.local_get, self.bottom_stack_value.local);
+ try self.addImm32(@intCast(i32, self.stack_size));
+ try self.addTag(.i32_add);
+ try self.addLabel(.local_set, local.local);
+ return local;
}
/// From a given type, will create space on the virtual stack to store the value of such type.
/// This returns a `WValue` with its active tag set to `local`, containing the index to the local
/// that points to the position on the virtual stack. This function should be used instead of
-/// moveStack unless a local was already created to store the point.
+/// moveStack unless a local was already created to store the pointer.
///
/// Asserts Type has codegenbits
fn allocStack(self: *Self, ty: Type) !WValue {
assert(ty.hasRuntimeBits());
+ if (self.initial_stack_value == .none) {
+ try self.initializeStack();
+ }
- // calculate needed stack space
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
- return self.fail("Given type '{}' too big to fit into stack frame", .{ty});
+ return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ ty, ty.abiSize(self.target) });
};
+ const abi_align = ty.abiAlignment(self.target);
- // allocate a local using wasm's pointer size
- const local = try self.allocLocal(Type.@"usize");
- try self.moveStack(abi_size, local.local);
- return local;
+ if (abi_align > self.stack_alignment) {
+ self.stack_alignment = abi_align;
+ }
+
+ const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_align);
+ defer self.stack_size = offset + abi_size;
+
+ // store the stack pointer and return a local to it
+ return self.saveStack();
+}
+
+/// From a given AIR instruction generates a pointer to the stack where
+/// the value of its type will live.
+/// This is different from allocStack where this will use the pointer's alignment
+/// if it is set, to ensure the stack alignment will be set correctly.
+fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
+ const ptr_ty = self.air.typeOfIndex(inst);
+ const pointee_ty = ptr_ty.childType();
+
+ if (self.initial_stack_value == .none) {
+ try self.initializeStack();
+ }
+
+ if (!pointee_ty.hasRuntimeBits()) {
+ return self.allocStack(Type.usize); // create a value containing just the stack pointer.
+ }
+
+ const abi_alignment = ptr_ty.ptrAlignment(self.target);
+ const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch {
+ return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ pointee_ty, pointee_ty.abiSize(self.target) });
+ };
+ if (abi_alignment > self.stack_alignment) {
+ self.stack_alignment = abi_alignment;
+ }
+
+ const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_alignment);
+ defer self.stack_size = offset + abi_size;
+
+ // store the stack pointer and return a local to it
+ return self.saveStack();
}
/// From given zig bitsize, returns the wasm bitsize
@@ -1667,12 +1737,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(child_type, self.target)) {
return self.return_value;
}
-
- // Initialize the stack
- if (self.initial_stack_value == .none) {
- try self.initializeStack();
- }
- return self.allocStack(child_type);
+ return self.allocStackPtr(inst);
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -1764,20 +1829,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- const pointee_type = self.air.typeOfIndex(inst).childType();
-
- // Initialize the stack
- if (self.initial_stack_value == .none) {
- try self.initializeStack();
- }
-
- if (!pointee_type.hasRuntimeBits()) {
- // when the pointee is zero-sized, we still want to create a pointer.
- // but instead use a default pointer type as storage.
- const zero_ptr = try self.allocStack(Type.usize);
- return zero_ptr;
- }
- return self.allocStack(pointee_type);
+ return self.allocStackPtr(inst);
}
fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
From 588b88b98753f02061e562a9c15c2396bcd95dee Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Thu, 3 Feb 2022 22:25:46 +0100
Subject: [PATCH 0022/2031] Move passing behavior tests
Singular tests (such as in the bug ones) are moved to top level with exclusions for non-passing backends.
The big behavior tests such as array_llvm and slice are moved to the inner scope with the C backend disabled.
They all pass for the wasm backend now
---
src/arch/wasm/CodeGen.zig | 2 +-
test/behavior.zig | 12 ++++++------
test/behavior/array_llvm.zig | 18 ++++++++++++++++++
test/behavior/bugs/1025.zig | 4 ++++
test/behavior/bugs/1741.zig | 3 +++
test/behavior/bugs/1914.zig | 7 +++++++
test/behavior/slice.zig | 13 +++++++++++++
7 files changed, 52 insertions(+), 7 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d2db5fd92b..67aa9a6c88 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1198,7 +1198,7 @@ pub const DeclGen = struct {
const elem_ptr = val.castTag(.elem_ptr).?.data;
const elem_size = ty.childType().abiSize(self.target());
const offset = elem_ptr.index * elem_size;
- return self.lowerParentPtr(elem_ptr.array_ptr, offset);
+ return self.lowerParentPtr(elem_ptr.array_ptr, @intCast(usize, offset));
},
.int_u64 => return self.genTypedValue(Type.usize, val),
else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}),
diff --git a/test/behavior.zig b/test/behavior.zig
index a0db9b9f57..0f74ed7d59 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -10,6 +10,7 @@ test {
_ = @import("behavior/bugs/655.zig");
_ = @import("behavior/bugs/656.zig");
_ = @import("behavior/bugs/679.zig");
+ _ = @import("behavior/bugs/1025.zig");
_ = @import("behavior/bugs/1111.zig");
_ = @import("behavior/bugs/1277.zig");
_ = @import("behavior/bugs/1310.zig");
@@ -17,6 +18,8 @@ test {
_ = @import("behavior/bugs/1486.zig");
_ = @import("behavior/bugs/1500.zig");
_ = @import("behavior/bugs/1735.zig");
+ _ = @import("behavior/bugs/1741.zig");
+ _ = @import("behavior/bugs/1914.zig");
_ = @import("behavior/bugs/2006.zig");
_ = @import("behavior/bugs/2346.zig");
_ = @import("behavior/bugs/3112.zig");
@@ -38,7 +41,8 @@ test {
_ = @import("behavior/struct.zig");
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
- // Tests that pass for stage1, llvm backend, C backend, wasm backend.
+ // Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
+ _ = @import("behavior/array_llvm.zig");
_ = @import("behavior/basic.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bugs/624.zig");
@@ -69,6 +73,7 @@ test {
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
+ _ = @import("behavior/slice.zig");
_ = @import("behavior/src.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/try.zig");
@@ -88,11 +93,7 @@ test {
if (builtin.zig_backend != .stage2_c) {
// Tests that pass for stage1 and the llvm backend.
- _ = @import("behavior/array_llvm.zig");
_ = @import("behavior/atomics.zig");
- _ = @import("behavior/bugs/1025.zig");
- _ = @import("behavior/bugs/1741.zig");
- _ = @import("behavior/bugs/1914.zig");
_ = @import("behavior/bugs/2578.zig");
_ = @import("behavior/bugs/3007.zig");
_ = @import("behavior/bugs/9584.zig");
@@ -108,7 +109,6 @@ test {
_ = @import("behavior/popcount.zig");
_ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
- _ = @import("behavior/slice.zig");
_ = @import("behavior/struct_llvm.zig");
_ = @import("behavior/switch.zig");
_ = @import("behavior/widening.zig");
diff --git a/test/behavior/array_llvm.zig b/test/behavior/array_llvm.zig
index 5be5974fff..c3df5ba837 100644
--- a/test/behavior/array_llvm.zig
+++ b/test/behavior/array_llvm.zig
@@ -7,6 +7,7 @@ var s_array: [8]Sub = undefined;
const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var s = Str{ .a = s_array[0..] };
s.a[0].b = 1;
@@ -19,6 +20,7 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try expect(storage[0].term == 1);
@@ -36,6 +38,7 @@ test "read/write through global variable array of struct fields initialized via
}
test "implicit cast single-item pointer" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testImplicitCastSingleItemPtr();
comptime try testImplicitCastSingleItemPtr();
}
@@ -52,6 +55,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const arr = [_]u8{ 1, 2 };
const x = comptime testArrayByValAtComptime(arr);
const y = comptime testArrayByValAtComptime(arr);
@@ -60,12 +64,14 @@ test "comptime evaluating function that takes array by value" {
}
test "runtime initialize array elem and then implicit cast to slice" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var two: i32 = 2;
const x: []const i32 = &[_]i32{two};
try expect(x[0] == 2);
}
test "array literal as argument to function" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn entry(two: i32) !void {
try foo(&[_]i32{ 1, 2, 3 });
@@ -90,6 +96,7 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn entry(two: i32) !void {
const cases = [_][]const []const i32{
@@ -147,6 +154,7 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
const Foo = struct {
a: usize = 2,
@@ -168,6 +176,7 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var array: [4:0]u8 = .{ 'a', 'o', 'e', 'u' };
@@ -181,6 +190,7 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var array = [_]u8{ 0x55, 0xAA };
@@ -196,6 +206,8 @@ test "type deduction for array subscript expression" {
test "sentinel element count towards the ABI size calculation" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@@ -218,6 +230,8 @@ test "sentinel element count towards the ABI size calculation" {
test "zero-sized array with recursive type definition" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const U = struct {
fn foo(comptime T: type, comptime n: usize) type {
@@ -237,6 +251,7 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
const U = union {
a: u32,
@@ -253,6 +268,7 @@ test "type coercion of anon struct literal to array" {
try expect(arr1[2] == 54);
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
var x2: U = .{ .a = 42 };
const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } };
@@ -268,6 +284,8 @@ test "type coercion of anon struct literal to array" {
test "type coercion of pointer to anon struct literal to pointer to array" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
const U = union {
diff --git a/test/behavior/bugs/1025.zig b/test/behavior/bugs/1025.zig
index 69ee77eea1..fa72e522de 100644
--- a/test/behavior/bugs/1025.zig
+++ b/test/behavior/bugs/1025.zig
@@ -1,3 +1,5 @@
+const builtin = @import("builtin");
+
const A = struct {
B: type,
};
@@ -7,6 +9,8 @@ fn getA() A {
}
test "bug 1025" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a = getA();
try @import("std").testing.expect(a.B == u8);
}
diff --git a/test/behavior/bugs/1741.zig b/test/behavior/bugs/1741.zig
index 8873de9b49..280aafc52e 100644
--- a/test/behavior/bugs/1741.zig
+++ b/test/behavior/bugs/1741.zig
@@ -1,6 +1,9 @@
const std = @import("std");
+const builtin = @import("builtin");
test "fixed" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const x: f32 align(128) = 12.34;
try std.testing.expect(@ptrToInt(&x) % 128 == 0);
}
diff --git a/test/behavior/bugs/1914.zig b/test/behavior/bugs/1914.zig
index 2c9e836e6a..6462937351 100644
--- a/test/behavior/bugs/1914.zig
+++ b/test/behavior/bugs/1914.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const builtin = @import("builtin");
const A = struct {
b_list_pointer: *const []B,
@@ -11,6 +12,9 @@ const b_list: []B = &[_]B{};
const a = A{ .b_list_pointer = &b_list };
test "segfault bug" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const assert = std.debug.assert;
const obj = B{ .a_pointer = &a };
assert(obj.a_pointer == &a); // this makes zig crash
@@ -27,5 +31,8 @@ pub const B2 = struct {
var b_value = B2{ .pointer_array = &[_]*A2{} };
test "basic stuff" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
std.debug.assert(&b_value == &b_value);
}
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 01ae10ee4e..0b01139800 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -27,6 +27,7 @@ comptime {
}
test "slicing" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var array: [20]i32 = undefined;
array[5] = 1234;
@@ -43,6 +44,7 @@ test "slicing" {
}
test "const slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
const a = "1234567890";
try expect(a.len == 10);
@@ -53,6 +55,7 @@ test "const slice" {
}
test "comptime slice of undefined pointer of length 0" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const slice1 = @as([*]i32, undefined)[0..0];
try expect(slice1.len == 0);
const slice2 = @as([*]i32, undefined)[100..100];
@@ -60,6 +63,7 @@ test "comptime slice of undefined pointer of length 0" {
}
test "implicitly cast array of size 0 to slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var msg = [_]u8{};
try assertLenIsZero(&msg);
}
@@ -69,6 +73,7 @@ fn assertLenIsZero(msg: []const u8) !void {
}
test "access len index of sentinel-terminated slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var slice: [:0]const u8 = "hello";
@@ -82,6 +87,7 @@ test "access len index of sentinel-terminated slice" {
}
test "comptime slice of slice preserves comptime var" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
var buff: [10]u8 = undefined;
buff[0..][0..][0] = 1;
@@ -90,6 +96,7 @@ test "comptime slice of slice preserves comptime var" {
}
test "slice of type" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
var types_array = [_]type{ i32, f64, type };
for (types_array) |T, i| {
@@ -112,6 +119,7 @@ test "slice of type" {
}
test "generic malloc free" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const a = memAlloc(u8, 10) catch unreachable;
memFree(u8, a);
}
@@ -124,6 +132,7 @@ fn memFree(comptime T: type, memory: []T) void {
}
test "slice of hardcoded address to pointer" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
const pointer = @intToPtr([*]u8, 0x04)[0..2];
@@ -138,6 +147,7 @@ test "slice of hardcoded address to pointer" {
}
test "comptime slice of pointer preserves comptime var" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
var buff: [10]u8 = undefined;
var a = @ptrCast([*]u8, &buff);
@@ -147,6 +157,7 @@ test "comptime slice of pointer preserves comptime var" {
}
test "comptime pointer cast array and then slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
const ptrA: [*]const u8 = @ptrCast([*]const u8, &array);
@@ -160,6 +171,7 @@ test "comptime pointer cast array and then slice" {
}
test "slicing zero length array" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const s1 = ""[0..];
const s2 = ([_]u32{})[0..];
try expect(s1.len == 0);
@@ -171,6 +183,7 @@ test "slicing zero length array" {
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(@ptrToInt(x) == 0x1000);
From 4ca9a8d192f4c800f10cdb3bd39c94922b6fb9b8 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Thu, 3 Feb 2022 19:19:48 +0100
Subject: [PATCH 0023/2031] x64: implement storing to MCValue.memory for PIE
targets
---
src/arch/x86_64/CodeGen.zig | 28 ++++++++++++++++++++--------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index e05a66228b..b3a292a7f4 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -1811,17 +1811,29 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
}
},
.memory => |addr| {
- if (self.bin_file.options.pie) {
- return self.fail("TODO implement storing to memory when targeting PIE", .{});
- }
-
- // TODO: in case the address fits in an imm32 we can use [ds:imm32]
- // instead of wasting an instruction copying the address to a register
-
value.freezeIfRegister(&self.register_manager);
defer value.unfreezeIfRegister(&self.register_manager);
- const addr_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = addr });
+ const addr_reg: Register = blk: {
+ if (self.bin_file.options.pie) {
+ const addr_reg = try self.register_manager.allocReg(null);
+ _ = try self.addInst(.{
+ .tag = .lea,
+ .ops = (Mir.Ops{
+ .reg1 = addr_reg.to64(),
+ .flags = 0b10,
+ }).encode(),
+ .data = .{ .got_entry = @truncate(u32, addr) },
+ });
+ break :blk addr_reg;
+ } else {
+ // TODO: in case the address fits in an imm32 we can use [ds:imm32]
+ // instead of wasting an instruction copying the address to a register
+ const addr_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = addr });
+ break :blk addr_reg;
+ }
+ };
+
// to get the actual address of the value we want to modify we have to go through the GOT
// mov reg, [reg]
_ = try self.addInst(.{
From 0893326e0ea9b261c5d334067c294c7d8972d5a1 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 3 Feb 2022 21:05:10 -0700
Subject: [PATCH 0024/2031] Sema: slice improvements
* resolve_inferred_alloc now gives a proper mutability attribute to the
corresponding alloc instruction. Previously, it would fail to mark
things const.
* slicing: fix the detection for when the end index equals the length
of the underlying object. Previously it was using `end - start` but
it should just use the end index directly. It also takes into account
when slicing a comptime-known slice.
* `Type.sentinel`: fix not handling all slice tags
---
src/Sema.zig | 98 ++++++++++++++++++++++++----------
src/type.zig | 3 ++
test/behavior/slice.zig | 12 +++++
test/behavior/slice_stage1.zig | 12 -----
4 files changed, 85 insertions(+), 40 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 94b5a7f1d1..a3dccf1d7d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2503,6 +2503,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
try sema.requireRuntimeBlock(block, src);
try sema.resolveTypeLayout(block, ty_src, final_elem_ty);
+ const final_ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = final_elem_ty,
+ .mutable = var_is_mut,
+ .@"align" = inferred_alloc.data.alignment,
+ .@"addrspace" = target_util.defaultAddressSpace(target, .local),
+ });
+
if (var_is_mut) {
try sema.validateVarType(block, ty_src, final_elem_ty, false);
} else ct: {
@@ -2534,8 +2541,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct;
if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct;
- const bitcast_ty_ref = air_datas[bitcast_inst].ty_op.ty;
-
const new_decl = d: {
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
@@ -2551,17 +2556,15 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
// block so that codegen does not see it.
block.instructions.shrinkRetainingCapacity(block.instructions.items.len - 3);
sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl);
- air_datas[ptr_inst].ty_pl.ty = bitcast_ty_ref;
+ // Would be nice if we could just assign `bitcast_ty_ref` to
+ // `air_datas[ptr_inst].ty_pl.ty`, wouldn't it? Alas, that is almost correct,
+ // except that the pointer is mutable and we need to make it constant here.
+ air_datas[ptr_inst].ty_pl.ty = try sema.addType(final_ptr_ty);
return;
}
// Change it to a normal alloc.
- const final_ptr_ty = try Type.ptr(sema.arena, .{
- .pointee_type = final_elem_ty,
- .@"align" = inferred_alloc.data.alignment,
- .@"addrspace" = target_util.defaultAddressSpace(target, .local),
- });
sema.air_instructions.set(ptr_inst, .{
.tag = .alloc,
.data = .{ .ty = final_ptr_ty },
@@ -15609,12 +15612,16 @@ fn analyzeSlice(
var slice_ty = ptr_ptr_ty;
var ptr_or_slice = ptr_ptr;
var elem_ty = ptr_ptr_child_ty.childType();
+ var ptr_sentinel: ?Value = null;
switch (ptr_ptr_child_ty.zigTypeTag()) {
- .Array => {},
+ .Array => {
+ ptr_sentinel = ptr_ptr_child_ty.sentinel();
+ },
.Pointer => switch (ptr_ptr_child_ty.ptrSize()) {
.One => {
const double_child_ty = ptr_ptr_child_ty.childType();
if (double_child_ty.zigTypeTag() == .Array) {
+ ptr_sentinel = double_child_ty.sentinel();
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = double_child_ty;
@@ -15624,12 +15631,14 @@ fn analyzeSlice(
}
},
.Many, .C => {
+ ptr_sentinel = ptr_ptr_child_ty.sentinel();
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = ptr_ptr_child_ty;
elem_ty = ptr_ptr_child_ty.childType();
},
.Slice => {
+ ptr_sentinel = ptr_ptr_child_ty.sentinel();
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = ptr_ptr_child_ty;
@@ -15647,29 +15656,67 @@ fn analyzeSlice(
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
const new_ptr = try analyzePtrArithmetic(sema, block, src, ptr, start, .ptr_add, ptr_src, start_src);
+ // true if and only if the end index of the slice, implicitly or explicitly, equals
+ // the length of the underlying object being sliced. we might learn the length of the
+ // underlying object because it is an array (which has the length in the type), or
+ // we might learn of the length because it is a comptime-known slice value.
+ var end_is_len = uncasted_end_opt == .none;
const end = e: {
- if (uncasted_end_opt != .none) {
- break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
- }
-
if (array_ty.zigTypeTag() == .Array) {
- break :e try sema.addConstant(
- Type.usize,
- try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()),
- );
+ const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen());
+
+ if (!end_is_len) {
+ const end = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ if (try sema.resolveMaybeUndefVal(block, end_src, end)) |end_val| {
+ if (end_val.eql(len_val, Type.usize)) {
+ end_is_len = true;
+ }
+ }
+ break :e end;
+ }
+
+ break :e try sema.addConstant(Type.usize, len_val);
} else if (slice_ty.isSlice()) {
+ if (!end_is_len) {
+ const end = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
+ if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
+ var int_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = slice_val.sliceLen(),
+ };
+ const slice_len_val = Value.initPayload(&int_payload.base);
+ if (end_val.eql(slice_len_val, Type.usize)) {
+ end_is_len = true;
+ }
+ }
+ }
+ break :e end;
+ }
break :e try sema.analyzeSliceLen(block, src, ptr_or_slice);
}
+ if (!end_is_len) {
+ break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ }
return sema.fail(block, end_src, "slice of pointer must include end value", .{});
};
- const slice_sentinel = if (sentinel_opt != .none) blk: {
- const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
- break :blk try sema.resolveConstValue(block, sentinel_src, casted);
- } else null;
+ const sentinel = s: {
+ if (sentinel_opt != .none) {
+ const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
+ break :s try sema.resolveConstValue(block, sentinel_src, casted);
+ }
+ // If we are slicing to the end of something that is sentinel-terminated
+ // then the resulting slice type is also sentinel-terminated.
+ if (end_is_len) {
+ if (ptr_sentinel) |sent| {
+ break :s sent;
+ }
+ }
+ break :s null;
+ };
const new_len = try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src);
-
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data;
@@ -15678,11 +15725,6 @@ fn analyzeSlice(
if (opt_new_len_val) |new_len_val| {
const new_len_int = new_len_val.toUnsignedInt();
- const sentinel = if (array_ty.zigTypeTag() == .Array and new_len_int == array_ty.arrayLen())
- array_ty.sentinel()
- else
- slice_sentinel;
-
const return_ty = try Type.ptr(sema.arena, .{
.pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty),
.sentinel = null,
@@ -15713,7 +15755,7 @@ fn analyzeSlice(
const return_ty = try Type.ptr(sema.arena, .{
.pointee_type = elem_ty,
- .sentinel = slice_sentinel,
+ .sentinel = sentinel,
.@"align" = new_ptr_ty_info.@"align",
.@"addrspace" = new_ptr_ty_info.@"addrspace",
.mutable = new_ptr_ty_info.mutable,
diff --git a/src/type.zig b/src/type.zig
index 5632629bff..272d09a921 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -3042,6 +3042,9 @@ pub const Type = extern union {
.array_u8,
.manyptr_u8,
.manyptr_const_u8,
+ .const_slice_u8,
+ .const_slice,
+ .mut_slice,
=> return null,
.pointer => return self.castTag(.pointer).?.data.sentinel,
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 0b01139800..d4e8284751 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -192,3 +192,15 @@ test "compile time slice of pointer to hard coded address" {
try expect(@ptrToInt(y) == 0x1400);
try expect(y.len == 0x400);
}
+
+test "slice string literal has correct type" {
+ comptime {
+ try expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8);
+ const array = [_]i32{ 1, 2, 3, 4 };
+ try expect(@TypeOf(array[0..]) == *const [4]i32);
+ }
+ var runtime_zero: usize = 0;
+ comptime try expect(@TypeOf("aoeu"[runtime_zero..]) == [:0]const u8);
+ const array = [_]i32{ 1, 2, 3, 4 };
+ comptime try expect(@TypeOf(array[runtime_zero..]) == []const i32);
+}
diff --git a/test/behavior/slice_stage1.zig b/test/behavior/slice_stage1.zig
index 3df7a75e10..cb7c0f5223 100644
--- a/test/behavior/slice_stage1.zig
+++ b/test/behavior/slice_stage1.zig
@@ -4,18 +4,6 @@ const expectEqualSlices = std.testing.expectEqualSlices;
const expectEqual = std.testing.expectEqual;
const mem = std.mem;
-test "slice string literal has correct type" {
- comptime {
- try expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8);
- const array = [_]i32{ 1, 2, 3, 4 };
- try expect(@TypeOf(array[0..]) == *const [4]i32);
- }
- var runtime_zero: usize = 0;
- comptime try expect(@TypeOf("aoeu"[runtime_zero..]) == [:0]const u8);
- const array = [_]i32{ 1, 2, 3, 4 };
- comptime try expect(@TypeOf(array[runtime_zero..]) == []const i32);
-}
-
test "result location zero sized array inside struct field implicit cast to slice" {
const E = struct {
entries: []u32,
From 1b6a1e691fab75ce40c9d0c4015c9f4a46b72aa4 Mon Sep 17 00:00:00 2001
From: Mateusz Radomski <33978857+m-radomski@users.noreply.github.com>
Date: Fri, 4 Feb 2022 06:58:27 +0100
Subject: [PATCH 0025/2031] Sema: check for NaNs in cmp (#10760)
---
src/Sema.zig | 7 +++++++
test/behavior/math.zig | 6 ++----
2 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index a3dccf1d7d..7666b2c9ec 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -15816,6 +15816,13 @@ fn cmpNumeric(
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(Type.bool);
}
+ if (lhs_val.isNan() or rhs_val.isNan()) {
+ if (op == std.math.CompareOperator.neq) {
+ return Air.Inst.Ref.bool_true;
+ } else {
+ return Air.Inst.Ref.bool_false;
+ }
+ }
if (Value.compareHetero(lhs_val, op, rhs_val)) {
return Air.Inst.Ref.bool_true;
} else {
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 77820ac0fb..8f947e2829 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -979,18 +979,16 @@ test "vector integer addition" {
}
test "NaN comparison" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
try testNanEqNan(f16);
try testNanEqNan(f32);
try testNanEqNan(f64);
try testNanEqNan(f128);
- if (has_f80_rt) try testNanEqNan(f80);
+ if (has_f80_rt and (builtin.zig_backend == .stage1)) try testNanEqNan(f80); // TODO
comptime try testNanEqNan(f16);
comptime try testNanEqNan(f32);
comptime try testNanEqNan(f64);
comptime try testNanEqNan(f128);
- // comptime try testNanEqNan(f80);
+ // comptime try testNanEqNan(f80); // TODO
}
fn testNanEqNan(comptime F: type) !void {
From 64f7231f86d4b8a155f48087b3f173d8e41b620c Mon Sep 17 00:00:00 2001
From: Kazuki Sakamoto
Date: Thu, 3 Feb 2022 21:12:36 -0800
Subject: [PATCH 0026/2031] stage1: Fix missing LLD library
---
cmake/Findlld.cmake | 1 +
1 file changed, 1 insertion(+)
diff --git a/cmake/Findlld.cmake b/cmake/Findlld.cmake
index 5b5fbcb468..8a46888531 100644
--- a/cmake/Findlld.cmake
+++ b/cmake/Findlld.cmake
@@ -49,6 +49,7 @@ else()
FIND_AND_ADD_LLD_LIB(lldELF)
FIND_AND_ADD_LLD_LIB(lldCOFF)
FIND_AND_ADD_LLD_LIB(lldWasm)
+ FIND_AND_ADD_LLD_LIB(lldMachO)
FIND_AND_ADD_LLD_LIB(lldReaderWriter)
FIND_AND_ADD_LLD_LIB(lldCore)
FIND_AND_ADD_LLD_LIB(lldYAML)
From 95fbce2b958395a367a82ce33170edd93e686173 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 3 Feb 2022 23:57:05 -0700
Subject: [PATCH 0027/2031] Sema: fixes to fieldVal, resolveStructFully,
Type.eql
fieldVal handles pointer to pointer to array. This can happen for
example, if a pointer to an array is used as the condition expression of
a for loop.
resolveStructFully handles tuples (by doing nothing).
fixed Type comparison for tuples to handle comptime fields properly.
---
src/Sema.zig | 62 +++---
src/type.zig | 14 +-
test/behavior.zig | 1 -
test/behavior/slice.zig | 352 +++++++++++++++++++++++++++++++++
test/behavior/slice_stage1.zig | 347 --------------------------------
5 files changed, 405 insertions(+), 371 deletions(-)
delete mode 100644 test/behavior/slice_stage1.zig
diff --git a/src/Sema.zig b/src/Sema.zig
index 7666b2c9ec..c4b3ad8c33 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -12998,7 +12998,7 @@ fn fieldVal(
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.addConstant(
- Type.initTag(.comptime_int),
+ Type.comptime_int,
try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()),
);
} else {
@@ -13010,26 +13010,43 @@ fn fieldVal(
);
}
},
- .Pointer => if (inner_ty.isSlice()) {
- if (mem.eql(u8, field_name, "ptr")) {
- const slice = if (is_pointer_to)
- try sema.analyzeLoad(block, src, object, object_src)
- else
- object;
- return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src);
- } else if (mem.eql(u8, field_name, "len")) {
- const slice = if (is_pointer_to)
- try sema.analyzeLoad(block, src, object, object_src)
- else
- object;
- return sema.analyzeSliceLen(block, src, slice);
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
+ .Pointer => {
+ const ptr_info = inner_ty.ptrInfo().data;
+ if (ptr_info.size == .Slice) {
+ if (mem.eql(u8, field_name, "ptr")) {
+ const slice = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+ return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src);
+ } else if (mem.eql(u8, field_name, "len")) {
+ const slice = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+ return sema.analyzeSliceLen(block, src, slice);
+ } else {
+ return sema.fail(
+ block,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
+ } else if (ptr_info.pointee_type.zigTypeTag() == .Array) {
+ if (mem.eql(u8, field_name, "len")) {
+ return sema.addConstant(
+ Type.comptime_int,
+ try Value.Tag.int_u64.create(arena, ptr_info.pointee_type.arrayLen()),
+ );
+ } else {
+ return sema.fail(
+ block,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, ptr_info.pointee_type },
+ );
+ }
}
},
.Type => {
@@ -16371,7 +16388,8 @@ fn resolveStructFully(
try resolveStructLayout(sema, block, src, ty);
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
- const struct_obj = resolved_ty.castTag(.@"struct").?.data;
+ const payload = resolved_ty.castTag(.@"struct") orelse return;
+ const struct_obj = payload.data;
switch (struct_obj.status) {
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
.fully_resolved_wip, .fully_resolved => return,
diff --git a/src/type.zig b/src/type.zig
index 272d09a921..e3a4b3d60a 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -634,7 +634,19 @@ pub const Type = extern union {
for (a_payload.data.values) |a_val, i| {
const ty = a_payload.data.types[i];
const b_val = b_payload.data.values[i];
- if (!Value.eql(a_val, b_val, ty)) return false;
+ if (a_val.tag() == .unreachable_value) {
+ if (b_val.tag() == .unreachable_value) {
+ continue;
+ } else {
+ return false;
+ }
+ } else {
+ if (b_val.tag() == .unreachable_value) {
+ return false;
+ } else {
+ if (!Value.eql(a_val, b_val, ty)) return false;
+ }
+ }
}
return true;
diff --git a/test/behavior.zig b/test/behavior.zig
index 0f74ed7d59..7b6cb6b402 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -163,7 +163,6 @@ test {
_ = @import("behavior/select.zig");
_ = @import("behavior/shuffle.zig");
_ = @import("behavior/sizeof_and_typeof_stage1.zig");
- _ = @import("behavior/slice_stage1.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index d4e8284751..4ec5f11817 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -204,3 +204,355 @@ test "slice string literal has correct type" {
const array = [_]i32{ 1, 2, 3, 4 };
comptime try expect(@TypeOf(array[runtime_zero..]) == []const i32);
}
+
+test "result location zero sized array inside struct field implicit cast to slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
+ const E = struct {
+ entries: []u32,
+ };
+ var foo = E{ .entries = &[_]u32{} };
+ try expect(foo.entries.len == 0);
+}
+
+test "runtime safety lets us slice from len..len" {
+ var an_array = [_]u8{ 1, 2, 3 };
+ try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
+}
+
+fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
+ return a_slice[start..end];
+}
+
+test "C pointer" {
+ var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
+ var len: u32 = 10;
+ var slice = buf[0..len];
+ try expect(mem.eql(u8, "kjdhfkjdhf", slice));
+}
+
+test "C pointer slice access" {
+ var buf: [10]u32 = [1]u32{42} ** 10;
+ const c_ptr = @ptrCast([*c]const u32, &buf);
+
+ var runtime_zero: usize = 0;
+ comptime try expectEqual([]const u32, @TypeOf(c_ptr[runtime_zero..1]));
+ comptime try expectEqual(*const [1]u32, @TypeOf(c_ptr[0..1]));
+
+ for (c_ptr[0..5]) |*cl| {
+ try expect(@as(u32, 42) == cl.*);
+ }
+}
+
+test "comptime slices are disambiguated" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try expect(sliceSum(&[_]u8{ 1, 2 }) == 3);
+ try expect(sliceSum(&[_]u8{ 3, 4 }) == 7);
+}
+
+fn sliceSum(comptime q: []const u8) i32 {
+ comptime var result = 0;
+ inline for (q) |item| {
+ result += item;
+ }
+ return result;
+}
+
+test "slice type with custom alignment" {
+ const LazilyResolvedType = struct {
+ anything: i32,
+ };
+ var slice: []align(32) LazilyResolvedType = undefined;
+ var array: [10]LazilyResolvedType align(32) = undefined;
+ slice = &array;
+ slice[1].anything = 42;
+ try expect(array[1].anything == 42);
+}
+
+test "obtaining a null terminated slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
+ // here we have a normal array
+ var buf: [50]u8 = undefined;
+
+ buf[0] = 'a';
+ buf[1] = 'b';
+ buf[2] = 'c';
+ buf[3] = 0;
+
+ // now we obtain a null terminated slice:
+ const ptr = buf[0..3 :0];
+ _ = ptr;
+
+ var runtime_len: usize = 3;
+ const ptr2 = buf[0..runtime_len :0];
+ // ptr2 is a null-terminated slice
+ comptime try expect(@TypeOf(ptr2) == [:0]u8);
+ comptime try expect(@TypeOf(ptr2[0..2]) == *[2]u8);
+ var runtime_zero: usize = 0;
+ comptime try expect(@TypeOf(ptr2[runtime_zero..2]) == []u8);
+}
+
+test "empty array to slice" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ const empty: []align(16) u8 = &[_]u8{};
+ const align_1: []align(1) u8 = empty;
+ const align_4: []align(4) u8 = empty;
+ const align_16: []align(16) u8 = empty;
+ try expectEqual(1, @typeInfo(@TypeOf(align_1)).Pointer.alignment);
+ try expectEqual(4, @typeInfo(@TypeOf(align_4)).Pointer.alignment);
+ try expectEqual(16, @typeInfo(@TypeOf(align_16)).Pointer.alignment);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "@ptrCast slice to pointer" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff };
+ var slice: []u8 = &array;
+ var ptr = @ptrCast(*u16, slice);
+ try expect(ptr.* == 65535);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "slice syntax resulting in pointer-to-array" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ try testArray();
+ try testArrayZ();
+ try testArray0();
+ try testArrayAlign();
+ try testPointer();
+ try testPointerZ();
+ try testPointer0();
+ try testPointerAlign();
+ try testSlice();
+ try testSliceZ();
+ try testSlice0();
+ try testSliceOpt();
+ try testSliceAlign();
+ }
+
+ fn testArray() !void {
+ var array = [5]u8{ 1, 2, 3, 4, 5 };
+ var slice = array[1..3];
+ comptime try expect(@TypeOf(slice) == *[2]u8);
+ try expect(slice[0] == 2);
+ try expect(slice[1] == 3);
+ }
+
+ fn testArrayZ() !void {
+ var array = [5:0]u8{ 1, 2, 3, 4, 5 };
+ comptime try expect(@TypeOf(array[1..3]) == *[2]u8);
+ comptime try expect(@TypeOf(array[1..5]) == *[4:0]u8);
+ comptime try expect(@TypeOf(array[1..]) == *[4:0]u8);
+ comptime try expect(@TypeOf(array[1..3 :4]) == *[2:4]u8);
+ }
+
+ fn testArray0() !void {
+ {
+ var array = [0]u8{};
+ var slice = array[0..0];
+ comptime try expect(@TypeOf(slice) == *[0]u8);
+ }
+ {
+ var array = [0:0]u8{};
+ var slice = array[0..0];
+ comptime try expect(@TypeOf(slice) == *[0:0]u8);
+ try expect(slice[0] == 0);
+ }
+ }
+
+ fn testArrayAlign() !void {
+ var array align(4) = [5]u8{ 1, 2, 3, 4, 5 };
+ var slice = array[4..5];
+ comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
+ try expect(slice[0] == 5);
+ comptime try expect(@TypeOf(array[0..2]) == *align(4) [2]u8);
+ }
+
+ fn testPointer() !void {
+ var array = [5]u8{ 1, 2, 3, 4, 5 };
+ var pointer: [*]u8 = &array;
+ var slice = pointer[1..3];
+ comptime try expect(@TypeOf(slice) == *[2]u8);
+ try expect(slice[0] == 2);
+ try expect(slice[1] == 3);
+ }
+
+ fn testPointerZ() !void {
+ var array = [5:0]u8{ 1, 2, 3, 4, 5 };
+ var pointer: [*:0]u8 = &array;
+ comptime try expect(@TypeOf(pointer[1..3]) == *[2]u8);
+ comptime try expect(@TypeOf(pointer[1..3 :4]) == *[2:4]u8);
+ }
+
+ fn testPointer0() !void {
+ var pointer: [*]const u0 = &[1]u0{0};
+ var slice = pointer[0..1];
+ comptime try expect(@TypeOf(slice) == *const [1]u0);
+ try expect(slice[0] == 0);
+ }
+
+ fn testPointerAlign() !void {
+ var array align(4) = [5]u8{ 1, 2, 3, 4, 5 };
+ var pointer: [*]align(4) u8 = &array;
+ var slice = pointer[4..5];
+ comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
+ try expect(slice[0] == 5);
+ comptime try expect(@TypeOf(pointer[0..2]) == *align(4) [2]u8);
+ }
+
+ fn testSlice() !void {
+ var array = [5]u8{ 1, 2, 3, 4, 5 };
+ var src_slice: []u8 = &array;
+ var slice = src_slice[1..3];
+ comptime try expect(@TypeOf(slice) == *[2]u8);
+ try expect(slice[0] == 2);
+ try expect(slice[1] == 3);
+ }
+
+ fn testSliceZ() !void {
+ var array = [5:0]u8{ 1, 2, 3, 4, 5 };
+ var slice: [:0]u8 = &array;
+ comptime try expect(@TypeOf(slice[1..3]) == *[2]u8);
+ comptime try expect(@TypeOf(slice[1..]) == [:0]u8);
+ comptime try expect(@TypeOf(slice[1..3 :4]) == *[2:4]u8);
+ }
+
+ fn testSliceOpt() !void {
+ var array: [2]u8 = [2]u8{ 1, 2 };
+ var slice: ?[]u8 = &array;
+ comptime try expect(@TypeOf(&array, slice) == ?[]u8);
+ comptime try expect(@TypeOf(slice.?[0..2]) == *[2]u8);
+ }
+
+ fn testSlice0() !void {
+ {
+ var array = [0]u8{};
+ var src_slice: []u8 = &array;
+ var slice = src_slice[0..0];
+ comptime try expect(@TypeOf(slice) == *[0]u8);
+ }
+ {
+ var array = [0:0]u8{};
+ var src_slice: [:0]u8 = &array;
+ var slice = src_slice[0..0];
+ comptime try expect(@TypeOf(slice) == *[0]u8);
+ }
+ }
+
+ fn testSliceAlign() !void {
+ var array align(4) = [5]u8{ 1, 2, 3, 4, 5 };
+ var src_slice: []align(4) u8 = &array;
+ var slice = src_slice[4..5];
+ comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
+ try expect(slice[0] == 5);
+ comptime try expect(@TypeOf(src_slice[0..2]) == *align(4) [2]u8);
+ }
+
+ fn testConcatStrLiterals() !void {
+ try expectEqualSlices("a"[0..] ++ "b"[0..], "ab");
+ try expectEqualSlices("a"[0.. :0] ++ "b"[0.. :0], "ab");
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "type coercion of pointer to anon struct literal to pointer to slice" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const U = union {
+ a: u32,
+ b: bool,
+ c: []const u8,
+ };
+
+ fn doTheTest() !void {
+ var x1: u8 = 42;
+ const t1 = &.{ x1, 56, 54 };
+ var slice1: []const u8 = t1;
+ try expect(slice1.len == 3);
+ try expect(slice1[0] == 42);
+ try expect(slice1[1] == 56);
+ try expect(slice1[2] == 54);
+
+ var x2: []const u8 = "hello";
+ const t2 = &.{ x2, ", ", "world!" };
+ // @compileLog(@TypeOf(t2));
+ var slice2: []const []const u8 = t2;
+ try expect(slice2.len == 3);
+ try expect(mem.eql(u8, slice2[0], "hello"));
+ try expect(mem.eql(u8, slice2[1], ", "));
+ try expect(mem.eql(u8, slice2[2], "world!"));
+ }
+ };
+ // try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "array concat of slices gives slice" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ comptime {
+ var a: []const u8 = "aoeu";
+ var b: []const u8 = "asdf";
+ const c = a ++ b;
+ try expect(std.mem.eql(u8, c, "aoeuasdf"));
+ }
+}
+
+test "slice bounds in comptime concatenation" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const bs = comptime blk: {
+ const b = "........1........";
+ break :blk b[8..9];
+ };
+ const str = "" ++ bs;
+ try expect(str.len == 1);
+ try expect(std.mem.eql(u8, str, "1"));
+
+ const str2 = bs ++ "";
+ try expect(str2.len == 1);
+ try expect(std.mem.eql(u8, str2, "1"));
+}
+
+test "slice sentinel access at comptime" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ {
+ const str0 = &[_:0]u8{ '1', '2', '3' };
+ const slice0: [:0]const u8 = str0;
+
+ try expect(slice0.len == 3);
+ try expect(slice0[slice0.len] == 0);
+ }
+ {
+ const str0 = "123";
+ _ = &str0[0];
+ const slice0: [:0]const u8 = str0;
+
+ try expect(slice0.len == 3);
+ try expect(slice0[slice0.len] == 0);
+ }
+}
diff --git a/test/behavior/slice_stage1.zig b/test/behavior/slice_stage1.zig
deleted file mode 100644
index cb7c0f5223..0000000000
--- a/test/behavior/slice_stage1.zig
+++ /dev/null
@@ -1,347 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const expectEqualSlices = std.testing.expectEqualSlices;
-const expectEqual = std.testing.expectEqual;
-const mem = std.mem;
-
-test "result location zero sized array inside struct field implicit cast to slice" {
- const E = struct {
- entries: []u32,
- };
- var foo = E{ .entries = &[_]u32{} };
- try expect(foo.entries.len == 0);
-}
-
-const x = @intToPtr([*]i32, 0x1000)[0..0x500];
-const y = x[0x100..];
-test "compile time slice of pointer to hard coded address" {
- try expect(@ptrToInt(x) == 0x1000);
- try expect(x.len == 0x500);
-
- try expect(@ptrToInt(y) == 0x1100);
- try expect(y.len == 0x400);
-}
-
-test "runtime safety lets us slice from len..len" {
- var an_array = [_]u8{ 1, 2, 3 };
- try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
-}
-
-fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
- return a_slice[start..end];
-}
-
-test "C pointer" {
- var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
- var len: u32 = 10;
- var slice = buf[0..len];
- try expectEqualSlices(u8, "kjdhfkjdhf", slice);
-}
-
-test "C pointer slice access" {
- var buf: [10]u32 = [1]u32{42} ** 10;
- const c_ptr = @ptrCast([*c]const u32, &buf);
-
- var runtime_zero: usize = 0;
- comptime try expectEqual([]const u32, @TypeOf(c_ptr[runtime_zero..1]));
- comptime try expectEqual(*const [1]u32, @TypeOf(c_ptr[0..1]));
-
- for (c_ptr[0..5]) |*cl| {
- try expectEqual(@as(u32, 42), cl.*);
- }
-}
-
-fn sliceSum(comptime q: []const u8) i32 {
- comptime var result = 0;
- inline for (q) |item| {
- result += item;
- }
- return result;
-}
-
-test "comptime slices are disambiguated" {
- try expect(sliceSum(&[_]u8{ 1, 2 }) == 3);
- try expect(sliceSum(&[_]u8{ 3, 4 }) == 7);
-}
-
-test "slice type with custom alignment" {
- const LazilyResolvedType = struct {
- anything: i32,
- };
- var slice: []align(32) LazilyResolvedType = undefined;
- var array: [10]LazilyResolvedType align(32) = undefined;
- slice = &array;
- slice[1].anything = 42;
- try expect(array[1].anything == 42);
-}
-
-test "obtaining a null terminated slice" {
- // here we have a normal array
- var buf: [50]u8 = undefined;
-
- buf[0] = 'a';
- buf[1] = 'b';
- buf[2] = 'c';
- buf[3] = 0;
-
- // now we obtain a null terminated slice:
- const ptr = buf[0..3 :0];
- _ = ptr;
-
- var runtime_len: usize = 3;
- const ptr2 = buf[0..runtime_len :0];
- // ptr2 is a null-terminated slice
- comptime try expect(@TypeOf(ptr2) == [:0]u8);
- comptime try expect(@TypeOf(ptr2[0..2]) == *[2]u8);
- var runtime_zero: usize = 0;
- comptime try expect(@TypeOf(ptr2[runtime_zero..2]) == []u8);
-}
-
-test "empty array to slice" {
- const S = struct {
- fn doTheTest() !void {
- const empty: []align(16) u8 = &[_]u8{};
- const align_1: []align(1) u8 = empty;
- const align_4: []align(4) u8 = empty;
- const align_16: []align(16) u8 = empty;
- try expectEqual(1, @typeInfo(@TypeOf(align_1)).Pointer.alignment);
- try expectEqual(4, @typeInfo(@TypeOf(align_4)).Pointer.alignment);
- try expectEqual(16, @typeInfo(@TypeOf(align_16)).Pointer.alignment);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "@ptrCast slice to pointer" {
- const S = struct {
- fn doTheTest() !void {
- var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff };
- var slice: []u8 = &array;
- var ptr = @ptrCast(*u16, slice);
- try expect(ptr.* == 65535);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "slice syntax resulting in pointer-to-array" {
- const S = struct {
- fn doTheTest() !void {
- try testArray();
- try testArrayZ();
- try testArray0();
- try testArrayAlign();
- try testPointer();
- try testPointerZ();
- try testPointer0();
- try testPointerAlign();
- try testSlice();
- try testSliceZ();
- try testSlice0();
- try testSliceOpt();
- try testSliceAlign();
- }
-
- fn testArray() !void {
- var array = [5]u8{ 1, 2, 3, 4, 5 };
- var slice = array[1..3];
- comptime try expect(@TypeOf(slice) == *[2]u8);
- try expect(slice[0] == 2);
- try expect(slice[1] == 3);
- }
-
- fn testArrayZ() !void {
- var array = [5:0]u8{ 1, 2, 3, 4, 5 };
- comptime try expect(@TypeOf(array[1..3]) == *[2]u8);
- comptime try expect(@TypeOf(array[1..5]) == *[4:0]u8);
- comptime try expect(@TypeOf(array[1..]) == *[4:0]u8);
- comptime try expect(@TypeOf(array[1..3 :4]) == *[2:4]u8);
- }
-
- fn testArray0() !void {
- {
- var array = [0]u8{};
- var slice = array[0..0];
- comptime try expect(@TypeOf(slice) == *[0]u8);
- }
- {
- var array = [0:0]u8{};
- var slice = array[0..0];
- comptime try expect(@TypeOf(slice) == *[0:0]u8);
- try expect(slice[0] == 0);
- }
- }
-
- fn testArrayAlign() !void {
- var array align(4) = [5]u8{ 1, 2, 3, 4, 5 };
- var slice = array[4..5];
- comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
- try expect(slice[0] == 5);
- comptime try expect(@TypeOf(array[0..2]) == *align(4) [2]u8);
- }
-
- fn testPointer() !void {
- var array = [5]u8{ 1, 2, 3, 4, 5 };
- var pointer: [*]u8 = &array;
- var slice = pointer[1..3];
- comptime try expect(@TypeOf(slice) == *[2]u8);
- try expect(slice[0] == 2);
- try expect(slice[1] == 3);
- }
-
- fn testPointerZ() !void {
- var array = [5:0]u8{ 1, 2, 3, 4, 5 };
- var pointer: [*:0]u8 = &array;
- comptime try expect(@TypeOf(pointer[1..3]) == *[2]u8);
- comptime try expect(@TypeOf(pointer[1..3 :4]) == *[2:4]u8);
- }
-
- fn testPointer0() !void {
- var pointer: [*]const u0 = &[1]u0{0};
- var slice = pointer[0..1];
- comptime try expect(@TypeOf(slice) == *const [1]u0);
- try expect(slice[0] == 0);
- }
-
- fn testPointerAlign() !void {
- var array align(4) = [5]u8{ 1, 2, 3, 4, 5 };
- var pointer: [*]align(4) u8 = &array;
- var slice = pointer[4..5];
- comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
- try expect(slice[0] == 5);
- comptime try expect(@TypeOf(pointer[0..2]) == *align(4) [2]u8);
- }
-
- fn testSlice() !void {
- var array = [5]u8{ 1, 2, 3, 4, 5 };
- var src_slice: []u8 = &array;
- var slice = src_slice[1..3];
- comptime try expect(@TypeOf(slice) == *[2]u8);
- try expect(slice[0] == 2);
- try expect(slice[1] == 3);
- }
-
- fn testSliceZ() !void {
- var array = [5:0]u8{ 1, 2, 3, 4, 5 };
- var slice: [:0]u8 = &array;
- comptime try expect(@TypeOf(slice[1..3]) == *[2]u8);
- comptime try expect(@TypeOf(slice[1..]) == [:0]u8);
- comptime try expect(@TypeOf(slice[1..3 :4]) == *[2:4]u8);
- }
-
- fn testSliceOpt() !void {
- var array: [2]u8 = [2]u8{ 1, 2 };
- var slice: ?[]u8 = &array;
- comptime try expect(@TypeOf(&array, slice) == ?[]u8);
- comptime try expect(@TypeOf(slice.?[0..2]) == *[2]u8);
- }
-
- fn testSlice0() !void {
- {
- var array = [0]u8{};
- var src_slice: []u8 = &array;
- var slice = src_slice[0..0];
- comptime try expect(@TypeOf(slice) == *[0]u8);
- }
- {
- var array = [0:0]u8{};
- var src_slice: [:0]u8 = &array;
- var slice = src_slice[0..0];
- comptime try expect(@TypeOf(slice) == *[0]u8);
- }
- }
-
- fn testSliceAlign() !void {
- var array align(4) = [5]u8{ 1, 2, 3, 4, 5 };
- var src_slice: []align(4) u8 = &array;
- var slice = src_slice[4..5];
- comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
- try expect(slice[0] == 5);
- comptime try expect(@TypeOf(src_slice[0..2]) == *align(4) [2]u8);
- }
-
- fn testConcatStrLiterals() !void {
- try expectEqualSlices("a"[0..] ++ "b"[0..], "ab");
- try expectEqualSlices("a"[0.. :0] ++ "b"[0.. :0], "ab");
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "type coercion of pointer to anon struct literal to pointer to slice" {
- const S = struct {
- const U = union {
- a: u32,
- b: bool,
- c: []const u8,
- };
-
- fn doTheTest() !void {
- var x1: u8 = 42;
- const t1 = &.{ x1, 56, 54 };
- var slice1: []const u8 = t1;
- try expect(slice1.len == 3);
- try expect(slice1[0] == 42);
- try expect(slice1[1] == 56);
- try expect(slice1[2] == 54);
-
- var x2: []const u8 = "hello";
- const t2 = &.{ x2, ", ", "world!" };
- // @compileLog(@TypeOf(t2));
- var slice2: []const []const u8 = t2;
- try expect(slice2.len == 3);
- try expect(mem.eql(u8, slice2[0], "hello"));
- try expect(mem.eql(u8, slice2[1], ", "));
- try expect(mem.eql(u8, slice2[2], "world!"));
- }
- };
- // try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "array concat of slices gives slice" {
- comptime {
- var a: []const u8 = "aoeu";
- var b: []const u8 = "asdf";
- const c = a ++ b;
- try expect(std.mem.eql(u8, c, "aoeuasdf"));
- }
-}
-
-test "slice bounds in comptime concatenation" {
- const bs = comptime blk: {
- const b = "........1........";
- break :blk b[8..9];
- };
- const str = "" ++ bs;
- try expect(str.len == 1);
- try expect(std.mem.eql(u8, str, "1"));
-
- const str2 = bs ++ "";
- try expect(str2.len == 1);
- try expect(std.mem.eql(u8, str2, "1"));
-}
-
-test "slice sentinel access at comptime" {
- {
- const str0 = &[_:0]u8{ '1', '2', '3' };
- const slice0: [:0]const u8 = str0;
-
- try expect(slice0.len == 3);
- try expect(slice0[slice0.len] == 0);
- }
- {
- const str0 = "123";
- _ = &str0[0];
- const slice0: [:0]const u8 = str0;
-
- try expect(slice0.len == 3);
- try expect(slice0[slice0.len] == 0);
- }
-}
From 71321b694195a87ab7394a25badf5295eb01875e Mon Sep 17 00:00:00 2001
From: Kirk Scheibelhut
Date: Fri, 4 Feb 2022 11:27:50 -0800
Subject: [PATCH 0028/2031] Various documentation fixes
Co-authored-by: Kirk Scheibelhut
Co-authored-by: extrasharp
---
doc/langref.html.in | 190 +++++++++++++++++++++++++++-----------------
1 file changed, 119 insertions(+), 71 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 955d17f253..fd104db6da 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -1295,13 +1295,39 @@ test "expectError demo" {
A variable is a unit of {#link|Memory#} storage.
- Variables are never allowed to shadow identifiers from an outer scope.
-
-
It is generally preferable to use {#syntax#}const{#endsyntax#} rather than
{#syntax#}var{#endsyntax#} when declaring a variable. This causes less work for both
humans and computers to do when reading code, and creates more optimization opportunities.
+
+ {#header_open|Identifiers#}
+
+ Variable identifiers are never allowed to shadow identifiers from an outer scope.
+
+
+ Identifiers must start with an alphabetic character or underscore and may be followed
+ by any number of alphanumeric characters or underscores.
+ They must not overlap with any keywords. See {#link|Keyword Reference#}.
+
+
+ If a name that does not fit these requirements is needed, such as for linking with external libraries, the {#syntax#}@""{#endsyntax#} syntax may be used.
+
Container level variables have static lifetime and are order-independent and lazily analyzed.
@@ -1486,7 +1512,7 @@ fn divide(a: i32, b: i32) i32 {
Operators such as {#syntax#}+{#endsyntax#} and {#syntax#}-{#endsyntax#} cause undefined behavior on
- integer overflow. Alternative operators are provided for wrapping and saturating arithmetic on all targets.
+ integer overflow. Alternative operators are provided for wrapping and saturating arithmetic on all targets.
{#syntax#}+%{#endsyntax#} and {#syntax#}-%{#endsyntax#} perform wrapping arithmetic
while {#syntax#}+|{#endsyntax#} and {#syntax#}-|{#endsyntax#} perform saturating arithmetic.
@@ -2494,32 +2520,32 @@ test "null terminated array" {
or using the shorthand function {#syntax#}std.meta.Vector{#endsyntax#}.
- Vectors support the same builtin operators as their underlying base types. These operations are performed
+ Vectors support the same builtin operators as their underlying base types. These operations are performed
element-wise, and return a vector of the same length as the input vectors. This includes:
- It is prohibited to use a math operator on a mixture of scalars (individual numbers) and vectors.
- Zig provides the {#link|@splat#} builtin to easily convert from scalars to vectors, and it supports {#link|@reduce#}
- and array indexing syntax to convert from vectors to scalars. Vectors also support assignment to and from
+ It is prohibited to use a math operator on a mixture of scalars (individual numbers) and vectors.
+ Zig provides the {#link|@splat#} builtin to easily convert from scalars to vectors, and it supports {#link|@reduce#}
+ and array indexing syntax to convert from vectors to scalars. Vectors also support assignment to and from
fixed-length arrays with comptime known length.
For rearranging elements within and between vectors, Zig provides the {#link|@shuffle#} and {#link|@select#} functions.
- Operations on vectors shorter than the target machine's native SIMD size will typically compile to single SIMD
- instructions, while vectors longer than the target machine's native SIMD size will compile to multiple SIMD
- instructions. If a given operation doesn't have SIMD support on the target architecture, the compiler will default
- to operating on each vector element one at a time. Zig supports any comptime-known vector length up to 2^32-1,
- although small powers of two (2-64) are most typical. Note that excessively long vector lengths (e.g. 2^20) may
+ Operations on vectors shorter than the target machine's native SIMD size will typically compile to single SIMD
+ instructions, while vectors longer than the target machine's native SIMD size will compile to multiple SIMD
+ instructions. If a given operation doesn't have SIMD support on the target architecture, the compiler will default
+ to operating on each vector element one at a time. Zig supports any comptime-known vector length up to 2^32-1,
+ although small powers of two (2-64) are most typical. Note that excessively long vector lengths (e.g. 2^20) may
result in compiler crashes on current versions of Zig.
{#code_begin|test|vector_example#}
@@ -2569,7 +2595,7 @@ test "Conversion between vectors, arrays, and slices" {
TODO consider suggesting std.MultiArrayList
{#see_also|@splat|@shuffle|@select|@reduce#}
-
+
{#header_close#}
{#header_open|Pointers#}
@@ -2987,8 +3013,8 @@ test "null terminated slice" {
}
{#code_end#}
- Sentinel-terminated slices can also be created using a variation of the slice syntax
- {#syntax#}data[start..end :x]{#endsyntax#}, where {#syntax#}data{#endsyntax#} is a many-item pointer,
+ Sentinel-terminated slices can also be created using a variation of the slice syntax
+ {#syntax#}data[start..end :x]{#endsyntax#}, where {#syntax#}data{#endsyntax#} is a many-item pointer,
array or slice and {#syntax#}x{#endsyntax#} is the sentinel value.
- Sentinel-terminated slicing asserts that the element in the sentinel position of the backing data is
+ Sentinel-terminated slicing asserts that the element in the sentinel position of the backing data is
actually the sentinel value. If this is not the case, safety-protected {#link|Undefined Behavior#} results.
{#code_begin|test_safety|sentinel mismatch#}
@@ -3014,10 +3040,10 @@ const expect = std.testing.expect;
test "sentinel mismatch" {
var array = [_]u8{ 3, 2, 1, 0 };
-
- // Creating a sentinel-terminated slice from the array with a length of 2
- // will result in the value `1` occupying the sentinel element position.
- // This does not match the indicated sentinel value of `0` and will lead
+
+ // Creating a sentinel-terminated slice from the array with a length of 2
+ // will result in the value `1` occupying the sentinel element position.
+ // This does not match the indicated sentinel value of `0` and will lead
// to a runtime panic.
var runtime_length: usize = 2;
const slice = array[0..runtime_length :0];
@@ -3165,7 +3191,7 @@ test "linked list" {
.last = &node,
.len = 1,
};
-
+
// When using a pointer to a struct, fields can be accessed directly,
// without explicitly dereferencing the pointer.
// So you can do
@@ -3497,7 +3523,7 @@ fn dump(args: anytype) !void {
The fields are implicitly named using numbers starting from 0. Because their names are integers,
- the {#syntax#}@"0"{#endsyntax#} syntax must be used to access them. Names inside {#syntax#}@""{#endsyntax#} are always recognised as identifiers.
+ the {#syntax#}@"0"{#endsyntax#} syntax must be used to access them. Names inside {#syntax#}@""{#endsyntax#} are always recognised as {#link|identifiers|Identifiers#}.
Like arrays, tuples have a .len field, can be indexed and work with the ++ and ** operators. They can also be iterated over with {#link|inline for#}.
@@ -3986,7 +4012,7 @@ test "labeled break from labeled block expression" {
{#see_also|Labeled while|Labeled for#}
{#header_open|Shadowing#}
-
Identifiers are never allowed to "hide" other identifiers by using the same name:
+
{#link|Identifiers#} are never allowed to "hide" other identifiers by using the same name:
{#code_begin|test_err|local shadows declaration#}
const pi = 3.14;
@@ -3998,8 +4024,8 @@ test "inside test block" {
}
{#code_end#}
- Because of this, when you read Zig code you can always rely on an identifier to consistently mean
- the same thing within the scope it is defined. Note that you can, however, use the same name if
+ Because of this, when you read Zig code you can always rely on an identifier to consistently mean
+ the same thing within the scope it is defined. Note that you can, however, use the same name if
the scopes are separate:
{#code_begin|test|test_scopes#}
@@ -4037,7 +4063,7 @@ test "switch simple" {
1, 2, 3 => 0,
// Ranges can be specified using the ... syntax. These are inclusive
- // both ends.
+ // of both ends.
5...100 => 1,
// Branches can be arbitrarily complex.
@@ -4809,7 +4835,7 @@ test "errdefer unwinding" {
{#header_open|Basics#}
{#code_begin|test|test_unreachable#}
-// unreachable is used to assert that control flow will never happen upon a
+// unreachable is used to assert that control flow will never reach a
// particular location:
test "basic math" {
const x = 1;
@@ -6777,8 +6803,7 @@ test "variable values" {
generic data structure.
- Here is an example of a generic {#syntax#}List{#endsyntax#} data structure, that we will instantiate with
- the type {#syntax#}i32{#endsyntax#}. In Zig we refer to the type as {#syntax#}List(i32){#endsyntax#}.
+ Here is an example of a generic {#syntax#}List{#endsyntax#} data structure.
- That's it. It's a function that returns an anonymous {#syntax#}struct{#endsyntax#}. For the purposes of error messages
- and debugging, Zig infers the name {#syntax#}"List(i32)"{#endsyntax#} from the function name and parameters invoked when creating
- the anonymous struct.
-
-
- To keep the language small and uniform, all aggregate types in Zig are anonymous. To give a type
- a name, we assign it to a constant:
-
- {#code_begin|syntax#}
-const Node = struct {
- next: *Node,
- name: []u8,
+
+// The generic List data structure can be instantiated by passing in a type:
+var buffer: [10]i32 = undefined;
+var list = List(i32){
+ .items = &buffer,
+ .len = 0,
};
{#code_end#}
- This works because all top level declarations are order-independent, and as long as there isn't
- an actual infinite regression, values can refer to themselves, directly or indirectly. In this case,
- {#syntax#}Node{#endsyntax#} refers to itself as a pointer, which is not actually an infinite regression, so
- it works fine.
+ That's it. It's a function that returns an anonymous {#syntax#}struct{#endsyntax#}.
+ To keep the language small and uniform, all aggregate types in Zig are anonymous.
+ For the purposes of error messages and debugging, Zig infers the name
+ {#syntax#}"List(i32)"{#endsyntax#} from the function name and parameters invoked when creating
+ the anonymous struct.
+
+
+ To explicitly give a type a name, we assign it to a constant.
+
+ In this example, the {#syntax#}Node{#endsyntax#} struct refers to itself.
+ This works because all top level declarations are order-independent.
+ As long as the compiler can determine the size of the struct, it is free to refer to itself.
+ In this case, {#syntax#}Node{#endsyntax#} refers to itself as a pointer, which has a
+ well-defined size at compile time, so it works fine.
{#header_close#}
{#header_open|Case Study: print in Zig#}
@@ -7220,10 +7264,10 @@ test "global assembly" {
provided explicitly by the caller, and it can be suspended and resumed any number of times.
- The code following the {#syntax#}async{#endsyntax#} callsite runs immediately after the async
- function first suspends. When the return value of the async function is needed,
- the calling code can {#syntax#}await{#endsyntax#} on the async function frame.
- This will suspend the calling code until the async function completes, at which point
+ The code following the {#syntax#}async{#endsyntax#} callsite runs immediately after the async
+ function first suspends. When the return value of the async function is needed,
+ the calling code can {#syntax#}await{#endsyntax#} on the async function frame.
+ This will suspend the calling code until the async function completes, at which point
execution resumes just after the {#syntax#}await{#endsyntax#} callsite.
@@ -7333,8 +7377,8 @@ fn testResumeFromSuspend(my_result: *i32) void {
in standard code.
- However, it is possible to have an {#syntax#}async{#endsyntax#} call
- without a matching {#syntax#}await{#endsyntax#}. Upon completion of the async function,
+ However, it is possible to have an {#syntax#}async{#endsyntax#} call
+ without a matching {#syntax#}await{#endsyntax#}. Upon completion of the async function,
execution would continue at the most recent {#syntax#}async{#endsyntax#} callsite or {#syntax#}resume{#endsyntax#} callsite,
and the return value of the async function would be lost.
@@ -7371,8 +7415,8 @@ fn func() void {
{#syntax#}await{#endsyntax#} is a suspend point, and takes as an operand anything that
- coerces to {#syntax#}anyframe->T{#endsyntax#}. Calling {#syntax#}await{#endsyntax#} on
- the frame of an async function will cause execution to continue at the
+ coerces to {#syntax#}anyframe->T{#endsyntax#}. Calling {#syntax#}await{#endsyntax#} on
+ the frame of an async function will cause execution to continue at the
{#syntax#}await{#endsyntax#} callsite once the target function completes.
Note that even when using {#syntax#}export{#endsyntax#}, {#syntax#}@"foo"{#endsyntax#} syntax can
- be used to choose any string for the symbol name:
+
Note that even when using {#syntax#}export{#endsyntax#}, the {#syntax#}@"foo"{#endsyntax#} syntax for
+ {#link|identifiers|Identifiers#} can be used to choose any string for the symbol name:
{#code_begin|obj#}
export fn @"A function name that is a complete sentence."() void {}
{#code_end#}
@@ -8597,7 +8641,9 @@ test "integer cast panic" {
{#header_open|@intToPtr#}
- Converts an integer to a {#link|pointer|Pointers#}. To convert the other way, use {#link|@ptrToInt#}.
+ Converts an integer to a {#link|pointer|Pointers#}. To convert the other way, use {#link|@ptrToInt#}. Casting an address of 0 to a destination type
+ which in not {#link|optional|Optional Pointers#} and does not have the {#syntax#}allowzero{#endsyntax#} attribute will result in a
+ {#link|Pointer Cast Invalid Null#} panic when runtime safety checks are enabled.
If the destination pointer type does not allow address zero and {#syntax#}address{#endsyntax#}
@@ -8711,7 +8757,8 @@ test "@wasmMemoryGrow" {
Modulus division. For unsigned integers this is the same as
- {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}.
+ {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}, otherwise the
+ operation will result in a {#link|Remainder Division by Zero#} when runtime safety checks are enabled.
{#syntax#}@mod(-5, 3) == 1{#endsyntax#}
@@ -8729,7 +8776,7 @@ test "@wasmMemoryGrow" {
If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
{#header_close#}
-
+
{#header_open|@panic#}
Remainder division. For unsigned integers this is the same as
- {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}.
+ {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}, otherwise the
+ operation will result in a {#link|Remainder Division by Zero#} when runtime safety checks are enabled.
Changes the maximum number of backwards branches that compile-time code
execution can use before giving up and making a compile error.
@@ -8920,7 +8968,7 @@ test "foo" {
{#header_close#}
{#header_open|@setFloatMode#}
-
From 04f379dd414184a42412f4497b0573d7612d6730 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Thu, 3 Feb 2022 20:31:01 +0100
Subject: [PATCH 0029/2031] stage2 ARM: optimize airSliceElemVal for elem_size
1 or 4
In these cases, the AIR inst can be lowered to only one ldr
instruction.
Also fixes shifts in arm.bits.Offset
---
src/arch/arm/CodeGen.zig | 160 ++++++++++++++++++++-------------------
src/arch/arm/bits.zig | 84 +++++++++++++-------
2 files changed, 139 insertions(+), 105 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index c7e80dbe24..3d334656a1 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -1222,9 +1222,16 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: {
+
+ if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+ const result: MCValue = result: {
const slice_mcv = try self.resolveInst(bin_op.lhs);
+ // TODO optimize for the case where the index is a constant,
+ // i.e. index_mcv == .immediate
+ const index_mcv = try self.resolveInst(bin_op.rhs);
+ const index_is_register = index_mcv == .register;
+
const slice_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = slice_ty.childType();
const elem_size = elem_ty.abiSize(self.target.*);
@@ -1232,12 +1239,8 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
- // TODO optimize this for the case when elem_size is a power
- // of two (includes elem_size == 1)
- const offset_mcv = try self.genArmMulConstant(inst, bin_op.rhs, 1, @intCast(u32, elem_size));
- assert(offset_mcv == .register); // result of multiplication should always be register
- self.register_manager.freezeRegs(&.{offset_mcv.register});
- defer self.register_manager.unfreezeRegs(&.{offset_mcv.register});
+ if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register});
+ defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
const base_mcv: MCValue = switch (slice_mcv) {
.stack_offset => .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, slice_mcv) },
@@ -1246,61 +1249,67 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
self.register_manager.freezeRegs(&.{base_mcv.register});
defer self.register_manager.unfreezeRegs(&.{base_mcv.register});
- if (elem_size <= 4) {
- const dst_reg = try self.register_manager.allocReg(inst);
- self.register_manager.freezeRegs(&.{dst_reg});
- defer self.register_manager.unfreezeRegs(&.{dst_reg});
+ switch (elem_size) {
+ 1, 4 => {
+ const dst_reg = try self.register_manager.allocReg(inst);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ self.register_manager.freezeRegs(&.{dst_reg});
+ defer self.register_manager.unfreezeRegs(&.{dst_reg});
- switch (elem_size) {
- 1, 4 => {
- const tag: Mir.Inst.Tag = switch (elem_size) {
- 1 => .ldrb,
- 4 => .ldr,
- else => unreachable,
- };
+ const index_reg: Register = switch (index_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(Type.usize, index_mcv),
+ };
+ self.register_manager.freezeRegs(&.{index_reg});
+ defer self.register_manager.unfreezeRegs(&.{index_reg});
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_offset = .{
- .rt = dst_reg,
- .rn = base_mcv.register,
- .offset = .{ .offset = Instruction.Offset.reg(offset_mcv.register, 0) },
- } },
- });
- },
- 2 => {
- _ = try self.addInst(.{
- .tag = .ldrh,
- .data = .{ .rr_extra_offset = .{
- .rt = dst_reg,
- .rn = base_mcv.register,
- .offset = .{ .offset = Instruction.ExtraLoadStoreOffset.reg(offset_mcv.register) },
- } },
- });
- },
- else => unreachable,
- }
+ const tag: Mir.Inst.Tag = switch (elem_size) {
+ 1 => .ldrb,
+ 4 => .ldr,
+ else => unreachable,
+ };
+ const shift: u5 = switch (elem_size) {
+ 1 => 0,
+ 4 => 2,
+ else => unreachable,
+ };
- break :result MCValue{ .register = dst_reg };
- } else {
- const dst_mcv = try self.allocRegOrMem(inst, false);
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_offset = .{
+ .rt = dst_reg,
+ .rn = base_mcv.register,
+ .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) },
+ } },
+ });
- const addr_reg = try self.register_manager.allocReg(null);
- self.register_manager.freezeRegs(&.{addr_reg});
- defer self.register_manager.unfreezeRegs(&.{addr_reg});
+ break :result dst_mcv;
+ },
+ else => {
+ const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.genArmBinOpCode(addr_reg, base_mcv, offset_mcv, false, .add, .unsigned);
+ const offset_mcv = try self.genArmMulConstant(bin_op.rhs, @intCast(u32, elem_size));
+ assert(offset_mcv == .register); // result of multiplication should always be register
+ self.register_manager.freezeRegs(&.{offset_mcv.register});
+ defer self.register_manager.unfreezeRegs(&.{offset_mcv.register});
- // I know we will unfreeze these registers at the end of
- // the scope of :result. However, at this point in time,
- // neither the base register nor the offset register
- // contains any valuable data anymore. In order to reduce
- // register pressure, unfreeze them prematurely
- self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
+ const addr_reg = try self.register_manager.allocReg(null);
+ self.register_manager.freezeRegs(&.{addr_reg});
+ defer self.register_manager.unfreezeRegs(&.{addr_reg});
- try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
+ try self.genArmBinOpCode(addr_reg, base_mcv, offset_mcv, false, .add, .unsigned);
- break :result dst_mcv;
+ // I know we will unfreeze these registers at the end of
+ // the scope of :result. However, at this point in time,
+ // neither the base register nor the offset register
+ // contains any valuable data anymore. In order to reduce
+ // register pressure, unfreeze them prematurely
+ self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
+
+ try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
+
+ break :result dst_mcv;
+ },
}
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -1931,8 +1940,8 @@ fn genArmBinOpCode(
.shl, .shr => {
assert(!swap_lhs_and_rhs);
const shift_amount = switch (operand) {
- .Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
- .Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
+ .register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
+ .immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
const tag: Mir.Inst.Tag = switch (op) {
@@ -2036,12 +2045,11 @@ fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Ai
return dst_mcv;
}
-fn genArmMulConstant(self: *Self, inst: Air.Inst.Index, op: Air.Inst.Ref, op_index: Liveness.OperandInt, imm: u32) !MCValue {
+fn genArmMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue {
const lhs = try self.resolveInst(op);
const rhs = MCValue{ .immediate = imm };
const lhs_is_register = lhs == .register;
- const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op, op_index, lhs);
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
@@ -2054,23 +2062,17 @@ fn genArmMulConstant(self: *Self, inst: Air.Inst.Index, op: Air.Inst.Ref, op_ind
var rhs_mcv: MCValue = rhs;
// Allocate registers for operands and/or destination
- if (reuse_lhs) {
- // Allocate 1 register
- rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
- dst_mcv = lhs;
+ // Allocate 1 or 2 registers
+ if (lhs_is_register) {
+ // Move RHS to register
+ dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
+ rhs_mcv = dst_mcv;
} else {
- // Allocate 1 or 2 registers
- if (lhs_is_register) {
- // Move RHS to register
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
- rhs_mcv = dst_mcv;
- } else {
- // Move LHS and RHS to register
- const regs = try self.register_manager.allocRegs(2, .{ null, null });
- lhs_mcv = MCValue{ .register = regs[0] };
- rhs_mcv = MCValue{ .register = regs[1] };
- dst_mcv = lhs_mcv;
- }
+ // Move LHS and RHS to register
+ const regs = try self.register_manager.allocRegs(2, .{ null, null });
+ lhs_mcv = MCValue{ .register = regs[0] };
+ rhs_mcv = MCValue{ .register = regs[1] };
+ dst_mcv = lhs_mcv;
}
// Move the operands to the newly allocated registers
@@ -2132,7 +2134,7 @@ fn genArmInlineMemcpy(
.data = .{ .rr_offset = .{
.rt = tmp,
.rn = src,
- .offset = .{ .offset = Instruction.Offset.reg(count, 0) },
+ .offset = .{ .offset = Instruction.Offset.reg(count, .none) },
} },
});
@@ -2142,7 +2144,7 @@ fn genArmInlineMemcpy(
.data = .{ .rr_offset = .{
.rt = tmp,
.rn = dst,
- .offset = .{ .offset = Instruction.Offset.reg(count, 0) },
+ .offset = .{ .offset = Instruction.Offset.reg(count, .none) },
} },
});
@@ -3126,7 +3128,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
1, 4 => {
const offset = if (math.cast(u12, adj_off)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
- } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
+ } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -3450,7 +3452,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
1, 4 => {
const offset = if (adj_off <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, adj_off));
- } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
+ } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .ldrb,
@@ -3536,7 +3538,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
1, 4 => {
const offset = if (math.cast(u12, adj_off)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
- } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
+ } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index 279ce58005..792bf0dc05 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -343,11 +343,11 @@ pub const Instruction = union(enum) {
/// which can either be content from a register or an immediate
/// value
pub const Operand = union(enum) {
- Register: packed struct {
+ register: packed struct {
rm: u4,
shift: u8,
},
- Immediate: packed struct {
+ immediate: packed struct {
imm: u8,
rotate: u4,
},
@@ -356,12 +356,12 @@ pub const Instruction = union(enum) {
/// register can be shifted by a specific immediate value or
/// by the contents of another register
pub const Shift = union(enum) {
- Immediate: packed struct {
+ immediate: packed struct {
fixed: u1 = 0b0,
typ: u2,
amount: u5,
},
- Register: packed struct {
+ register: packed struct {
fixed_1: u1 = 0b1,
typ: u2,
fixed_2: u1 = 0b0,
@@ -376,7 +376,7 @@ pub const Instruction = union(enum) {
};
pub const none = Shift{
- .Immediate = .{
+ .immediate = .{
.amount = 0,
.typ = 0,
},
@@ -384,14 +384,14 @@ pub const Instruction = union(enum) {
pub fn toU8(self: Shift) u8 {
return switch (self) {
- .Register => |v| @bitCast(u8, v),
- .Immediate => |v| @bitCast(u8, v),
+ .register => |v| @bitCast(u8, v),
+ .immediate => |v| @bitCast(u8, v),
};
}
pub fn reg(rs: Register, typ: Type) Shift {
return Shift{
- .Register = .{
+ .register = .{
.rs = rs.id(),
.typ = @enumToInt(typ),
},
@@ -400,7 +400,7 @@ pub const Instruction = union(enum) {
pub fn imm(amount: u5, typ: Type) Shift {
return Shift{
- .Immediate = .{
+ .immediate = .{
.amount = amount,
.typ = @enumToInt(typ),
},
@@ -410,14 +410,14 @@ pub const Instruction = union(enum) {
pub fn toU12(self: Operand) u12 {
return switch (self) {
- .Register => |v| @bitCast(u12, v),
- .Immediate => |v| @bitCast(u12, v),
+ .register => |v| @bitCast(u12, v),
+ .immediate => |v| @bitCast(u12, v),
};
}
pub fn reg(rm: Register, shift: Shift) Operand {
return Operand{
- .Register = .{
+ .register = .{
.rm = rm.id(),
.shift = shift.toU8(),
},
@@ -426,7 +426,7 @@ pub const Instruction = union(enum) {
pub fn imm(immediate: u8, rotate: u4) Operand {
return Operand{
- .Immediate = .{
+ .immediate = .{
.imm = immediate,
.rotate = rotate,
},
@@ -447,7 +447,7 @@ pub const Instruction = union(enum) {
return for (masks) |mask, i| {
if (x & mask == x) {
break Operand{
- .Immediate = .{
+ .immediate = .{
.imm = @intCast(u8, std.math.rotl(u32, x, 2 * i)),
.rotate = @intCast(u4, i),
},
@@ -461,35 +461,67 @@ pub const Instruction = union(enum) {
/// instruction. Data can be loaded from memory with either an
/// immediate offset or an offset that is stored in some register.
pub const Offset = union(enum) {
- Immediate: u12,
- Register: packed struct {
+ immediate: u12,
+ register: packed struct {
rm: u4,
- shift: u8,
+ fixed: u1 = 0b0,
+ stype: u2,
+ imm5: u5,
},
+ pub const Shift = union(enum) {
+ /// No shift
+ none,
+ /// Logical shift left
+ lsl: u5,
+ /// Logical shift right
+ lsr: u5,
+ /// Arithmetic shift right
+ asr: u5,
+ /// Rotate right
+ ror: u5,
+ /// Rotate right one bit, with extend
+ rrx,
+ };
+
pub const none = Offset{
- .Immediate = 0,
+ .immediate = 0,
};
pub fn toU12(self: Offset) u12 {
return switch (self) {
- .Register => |v| @bitCast(u12, v),
- .Immediate => |v| v,
+ .register => |v| @bitCast(u12, v),
+ .immediate => |v| v,
};
}
- pub fn reg(rm: Register, shift: u8) Offset {
+ pub fn reg(rm: Register, shift: Shift) Offset {
return Offset{
- .Register = .{
+ .register = .{
.rm = rm.id(),
- .shift = shift,
+ .stype = switch (shift) {
+ .none => 0b00,
+ .lsl => 0b00,
+ .lsr => 0b01,
+ .asr => 0b10,
+ .ror => 0b11,
+ .rrx => 0b11,
+ },
+ .imm5 = switch (shift) {
+ .none => 0,
+ .lsl => |n| n,
+ .lsr => |n| n,
+ .asr => |n| n,
+ .ror => |n| n,
+ .rrx => 0,
+ },
},
};
}
pub fn imm(immediate: u12) Offset {
return Offset{
- .Immediate = immediate,
+ .immediate = immediate,
};
}
};
@@ -567,7 +599,7 @@ pub const Instruction = union(enum) {
return Instruction{
.data_processing = .{
.cond = @enumToInt(cond),
- .i = @boolToInt(op2 == .Immediate),
+ .i = @boolToInt(op2 == .immediate),
.opcode = @enumToInt(opcode),
.s = s,
.rn = rn.id(),
@@ -681,7 +713,7 @@ pub const Instruction = union(enum) {
.byte_word = byte_word,
.up_down = @boolToInt(positive),
.pre_post = @boolToInt(pre_index),
- .imm = @boolToInt(offset != .Immediate),
+ .imm = @boolToInt(offset != .immediate),
},
};
}
From 5c4ef1a64ca71c2a43e362a5ad29a10bd880716c Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Thu, 20 Jan 2022 13:50:30 +0200
Subject: [PATCH 0030/2031] compiler-rt: add extend functions for f80
---
lib/std/math.zig | 2 +-
lib/std/special/compiler_rt.zig | 12 +-
lib/std/special/compiler_rt/extendXfYf2.zig | 5 -
lib/std/special/compiler_rt/extend_f80.zig | 131 ++++++++++++++++++++
4 files changed, 141 insertions(+), 9 deletions(-)
create mode 100644 lib/std/special/compiler_rt/extend_f80.zig
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 59532d7ab2..43ad49889d 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -43,7 +43,7 @@ pub const f128_max = @bitCast(f128, @as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF
pub const f128_epsilon = @bitCast(f128, @as(u128, 0x3F8F0000000000000000000000000000));
pub const f128_toint = 1.0 / f128_epsilon;
-const F80Repr = if (@import("builtin").cpu.arch.endian() == .Little) extern struct {
+pub const F80Repr = if (@import("builtin").cpu.arch.endian() == .Little) extern struct {
fraction: u64,
exp: u16,
} else extern struct {
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index 24bca128de..acb0d13baf 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -39,6 +39,15 @@ comptime {
const __extendhftf2 = @import("compiler_rt/extendXfYf2.zig").__extendhftf2;
@export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = linkage });
+ const __extendhfxf2 = @import("compiler_rt/extend_f80.zig").__extendhfxf2;
+ @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = linkage });
+ const __extendffxf2 = @import("compiler_rt/extend_f80.zig").__extendffxf2;
+ @export(__extendffxf2, .{ .name = "__extendffxf2", .linkage = linkage });
+ const __extenddfxf2 = @import("compiler_rt/extend_f80.zig").__extenddfxf2;
+ @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = linkage });
+ const __extendxftf2 = @import("compiler_rt/extend_f80.zig").__extendxftf2;
+ @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = linkage });
+
const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2;
@export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2;
@@ -181,9 +190,6 @@ comptime {
if (!long_double_is_f128) {
// TODO implement these
- //const __extendxftf2 = @import("compiler_rt/extendXfYf2.zig").__extendxftf2;
- //@export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = linkage });
-
//const __trunctfxf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfxf2;
//@export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
}
diff --git a/lib/std/special/compiler_rt/extendXfYf2.zig b/lib/std/special/compiler_rt/extendXfYf2.zig
index 2c3f0c88fc..8622fe1513 100644
--- a/lib/std/special/compiler_rt/extendXfYf2.zig
+++ b/lib/std/special/compiler_rt/extendXfYf2.zig
@@ -27,11 +27,6 @@ pub fn __extendhftf2(a: F16T) callconv(.C) f128 {
return extendXfYf2(f128, f16, @bitCast(u16, a));
}
-pub fn __extendxftf2(a: c_longdouble) callconv(.C) f128 {
- _ = a;
- @panic("TODO implement");
-}
-
pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, arg });
diff --git a/lib/std/special/compiler_rt/extend_f80.zig b/lib/std/special/compiler_rt/extend_f80.zig
new file mode 100644
index 0000000000..29ba8560ce
--- /dev/null
+++ b/lib/std/special/compiler_rt/extend_f80.zig
@@ -0,0 +1,131 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const native_arch = builtin.cpu.arch;
+
+// AArch64 is the only ABI (at the moment) to support f16 arguments without the
+// need for extending them to wider fp types.
+pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
+
+pub fn __extendhfxf2(a: F16T) callconv(.C) f80 {
+ return extendF80(f16, @bitCast(u16, a));
+}
+
+pub fn __extendffxf2(a: f32) callconv(.C) f80 {
+ return extendF80(f32, @bitCast(u32, a));
+}
+
+pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
+ return extendF80(f64, @bitCast(u64, a));
+}
+
+inline fn extendF80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(src_t);
+ const dst_int_bit = 0x8000000000000000;
+ const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ const dst_exp_bias = 16383;
+
+ const src_bits = @bitSizeOf(src_t);
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = (1 << src_exp_bits) - 1;
+ const src_exp_bias = src_inf_exp >> 1;
+
+ const src_min_normal = 1 << src_sig_bits;
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const src_qnan = 1 << (src_sig_bits - 1);
+ const src_nan_code = src_qnan - 1;
+
+ var dst: std.math.F80Repr align(16) = undefined;
+
+ // Break a into a sign and representation of the absolute value
+ const a_abs = a & src_abs_mask;
+ const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
+
+ if (a_abs -% src_min_normal < src_inf - src_min_normal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ dst.exp = @intCast(u16, a_abs >> src_sig_bits);
+ dst.exp += dst_exp_bias - src_exp_bias;
+ dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ } else if (a_abs >= src_inf) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ dst.exp = 0x7fff;
+ dst.fraction = dst_int_bit;
+ dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
+ } else if (a_abs != 0) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const scale: u16 = @clz(src_rep_t, a_abs) -
+ @clz(src_rep_t, @as(src_rep_t, src_min_normal));
+
+ dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
+ dst.exp ^= 1;
+ dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
+ } else {
+ // a is zero.
+ dst.exp = 0;
+ dst.fraction = 0;
+ }
+
+ dst.exp |= sign;
+ return @ptrCast(*const f80, &dst).*;
+}
+
+pub fn __extendxftf2(a: f80) callconv(.C) f128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const src_int_bit: u64 = 0x8000000000000000;
+ const src_sig_mask = ~src_int_bit;
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(f128);
+
+ const dst_bits = @bitSizeOf(f128);
+
+ const dst_min_normal = @as(u128, 1) << dst_sig_bits;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = @ptrCast(*const std.math.F80Repr, &a).*;
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ var abs_result: u128 = undefined;
+
+ if (a_rep.exp == 0 and a_rep.fraction == 0) {
+ // zero
+ abs_result = 0;
+ } else if (a_rep.exp == 0x7FFF) {
+ // a is nan or infinite
+ abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else if (a_rep.fraction & src_int_bit != 0) {
+ // a is a normal value
+ abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else {
+ // a is denormal
+ // renormalize the significand and clear the leading bit and integer part,
+ // then insert the correct adjusted exponent in the destination type.
+ const scale: u32 = @clz(u64, a_rep.fraction);
+ abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
+ abs_result ^= dst_min_normal;
+ abs_result |= @as(u128, scale + 1) << dst_sig_bits;
+ }
+
+ // Apply the signbit to (dst_t)abs(a).
+ const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
+ return @bitCast(f128, result);
+}
From 72cef17b1a23c4704b3931540b7f10f4297870b9 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Fri, 21 Jan 2022 12:41:09 +0200
Subject: [PATCH 0031/2031] compiler-rt: add trunc functions for f80
---
lib/std/special/compiler_rt.zig | 13 +-
lib/std/special/compiler_rt/truncXfYf2.zig | 5 -
lib/std/special/compiler_rt/trunc_f80.zig | 159 +++++++++++++++++++++
3 files changed, 167 insertions(+), 10 deletions(-)
create mode 100644 lib/std/special/compiler_rt/trunc_f80.zig
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index acb0d13baf..555a7a49d3 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -188,11 +188,14 @@ comptime {
const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2;
@export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage });
- if (!long_double_is_f128) {
- // TODO implement these
- //const __trunctfxf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfxf2;
- //@export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
- }
+ const __truncxfhf2 = @import("compiler_rt/trunc_f80.zig").__truncxfhf2;
+ @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = linkage });
+ const __truncxfff2 = @import("compiler_rt/trunc_f80.zig").__truncxfff2;
+ @export(__truncxfff2, .{ .name = "__truncxfff2", .linkage = linkage });
+ const __truncxfdf2 = @import("compiler_rt/trunc_f80.zig").__truncxfdf2;
+ @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = linkage });
+ const __trunctfxf2 = @import("compiler_rt/trunc_f80.zig").__trunctfxf2;
+ @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
if (builtin.zig_backend == .stage1) {
switch (arch) {
diff --git a/lib/std/special/compiler_rt/truncXfYf2.zig b/lib/std/special/compiler_rt/truncXfYf2.zig
index 4cded15abc..fea1aeb60a 100644
--- a/lib/std/special/compiler_rt/truncXfYf2.zig
+++ b/lib/std/special/compiler_rt/truncXfYf2.zig
@@ -26,11 +26,6 @@ pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
return truncXfYf2(f64, f128, a);
}
-pub fn __trunctfxf2(a: f128) callconv(.C) c_longdouble {
- _ = a;
- @panic("TODO implement");
-}
-
pub fn __truncdfsf2(a: f64) callconv(.C) f32 {
return truncXfYf2(f32, f64, a);
}
diff --git a/lib/std/special/compiler_rt/trunc_f80.zig b/lib/std/special/compiler_rt/trunc_f80.zig
new file mode 100644
index 0000000000..567d03be63
--- /dev/null
+++ b/lib/std/special/compiler_rt/trunc_f80.zig
@@ -0,0 +1,159 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const native_arch = builtin.cpu.arch;
+
+// AArch64 is the only ABI (at the moment) to support f16 arguments without the
+// need for extending them to wider fp types.
+pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
+
+pub fn __truncxfhf2(a: f80) callconv(.C) F16T {
+ return @bitCast(F16T, trunc(f16, a));
+}
+
+pub fn __truncxfff2(a: f80) callconv(.C) f32 {
+ return trunc(f32, a);
+}
+
+pub fn __truncxfdf2(a: f80) callconv(.C) f64 {
+ return trunc(f64, a);
+}
+
+inline fn trunc(comptime dst_t: type, a: f80) dst_t {
+ @setRuntimeSafety(builtin.is_test);
+
+ const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(dst_t);
+
+ const src_exp_bias = 16383;
+
+ const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
+ const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
+
+ const dst_bits = @typeInfo(dst_t).Float.bits;
+ const dst_exp_bits = dst_bits - dst_sig_bits - 1;
+ const dst_inf_exp = (1 << dst_exp_bits) - 1;
+ const dst_exp_bias = dst_inf_exp >> 1;
+
+ const underflow = src_exp_bias + 1 - dst_exp_bias;
+ const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
+
+ const dst_qnan = 1 << (dst_sig_bits - 1);
+ const dst_nan_mask = dst_qnan - 1;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = @ptrCast(*const std.math.F80Repr, &a).*;
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
+ var abs_result: dst_rep_t = undefined;
+
+ if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
+ abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits));
+ abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
+
+ const round_bits = a_rep.fraction & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ abs_result += 1;
+ } else if (round_bits == halfway) {
+ // Ties to even
+ abs_result += abs_result & 1;
+ }
+ } else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+ abs_result |= dst_qnan;
+ abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
+ } else if (a_rep.exp >= overflow) {
+ // a overflows to infinity.
+ abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
+
+ // Right shift by the denormalization amount with sticky.
+ if (shift > src_sig_bits) {
+ abs_result = 0;
+ } else {
+ const sticky = @boolToInt(a_rep.fraction << @intCast(u6, shift) != 0);
+ const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky;
+ abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits));
+ const round_bits = denormalized_significand & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ abs_result += 1;
+ } else if (round_bits == halfway) {
+ // Ties to even
+ abs_result += abs_result & 1;
+ }
+ }
+ }
+
+ const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
+ return @bitCast(dst_t, result);
+}
+
+pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
+ const src_sig_bits = std.math.floatMantissaBits(f128);
+ const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const src_bits = @typeInfo(f128).Float.bits;
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = 0x7FFF;
+
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
+ const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
+ const src_qnan = 1 << (src_sig_bits - 1);
+ const src_nan_mask = src_qnan - 1;
+
+ // Break a into a sign and representation of the absolute value
+ const a_rep = @bitCast(u128, a);
+ const a_abs = a_rep & src_abs_mask;
+ const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
+
+ var res: std.math.F80Repr align(16) = undefined;
+
+ if (a_abs > src_inf) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ res.exp = 0x7fff;
+ res.fraction = 0x8000000000000000;
+ res.fraction |= @truncate(u64, (a_abs & src_qnan) << (src_sig_bits - dst_sig_bits));
+ res.fraction |= @truncate(u64, (a_abs & src_nan_mask) << (src_sig_bits - dst_sig_bits));
+ } else {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits));
+ res.exp = @truncate(u16, a_abs >> src_sig_bits);
+
+ const round_bits = a_abs & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ const exp = @addWithOverflow(u64, res.fraction, 1, &res.fraction);
+ res.exp += @boolToInt(exp);
+ } else if (round_bits == halfway) {
+ // Ties to even
+ const exp = @addWithOverflow(u64, res.fraction, res.fraction & 1, &res.fraction);
+ res.exp += @boolToInt(exp);
+ }
+ }
+
+ res.exp |= sign;
+ return @ptrCast(*const f80, &res).*;
+}
From 9bbd3ab257137c97f695d187436e14c622f877c8 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Fri, 21 Jan 2022 15:26:43 +0200
Subject: [PATCH 0032/2031] compiler-rt: add comparison functions for f80
---
lib/std/special/compiler_rt.zig | 12 ++++
lib/std/special/compiler_rt/compareXf2.zig | 67 ++++++++++++++++++++++
src/stage1/codegen.cpp | 62 +++++++++++++++++++-
3 files changed, 140 insertions(+), 1 deletion(-)
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index 555a7a49d3..d83e94be8f 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -54,6 +54,8 @@ comptime {
@export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2;
@export(__letf2, .{ .name = "__letf2", .linkage = linkage });
+ const __lexf2 = @import("compiler_rt/compareXf2.zig").__lexf2;
+ @export(__lexf2, .{ .name = "__lexf2", .linkage = linkage });
const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2;
@export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
@@ -61,26 +63,36 @@ comptime {
@export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2;
@export(__getf2, .{ .name = "__getf2", .linkage = linkage });
+ const __gexf2 = @import("compiler_rt/compareXf2.zig").__gexf2;
+ @export(__gexf2, .{ .name = "__gexf2", .linkage = linkage });
const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2;
@export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2;
@export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
+ const __eqxf2 = @import("compiler_rt/compareXf2.zig").__eqxf2;
+ @export(__eqxf2, .{ .name = "__eqxf2", .linkage = linkage });
const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2;
@export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2;
@export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
+ const __ltxf2 = @import("compiler_rt/compareXf2.zig").__ltxf2;
+ @export(__ltxf2, .{ .name = "__ltxf2", .linkage = linkage });
const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2;
@export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2;
@export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
+ const __nexf2 = @import("compiler_rt/compareXf2.zig").__nexf2;
+ @export(__nexf2, .{ .name = "__nexf2", .linkage = linkage });
const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2;
@export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2;
@export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
+ const __gtxf2 = @import("compiler_rt/compareXf2.zig").__gtxf2;
+ @export(__gtxf2, .{ .name = "__gtxf2", .linkage = linkage });
if (!is_test) {
@export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage });
diff --git a/lib/std/special/compiler_rt/compareXf2.zig b/lib/std/special/compiler_rt/compareXf2.zig
index 9f3750094e..36f6f5f1c1 100644
--- a/lib/std/special/compiler_rt/compareXf2.zig
+++ b/lib/std/special/compiler_rt/compareXf2.zig
@@ -144,6 +144,73 @@ pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
return __gedf2(a, b);
}
+// Comparison between f80
+
+pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
+ const a_rep = @ptrCast(*const std.math.F80Repr, &a).*;
+ const b_rep = @ptrCast(*const std.math.F80Repr, &b).*;
+ const sig_bits = std.math.floatMantissaBits(f80);
+ const int_bit = 0x8000000000000000;
+ const sign_bit = 0x8000;
+ const special_exp = 0x7FFF;
+
+ // If either a or b is NaN, they are unordered.
+ if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
+ (b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
+ return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
+ return .Equal;
+
+ if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
+ return .Equal;
+ } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
+ // signs are different
+ if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ } else {
+ const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
+ const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
+ if (a_fraction < b_fraction) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ }
+}
+
+pub fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
+ @setRuntimeSafety(builtin.is_test);
+ const float = cmp_f80(LE, a, b);
+ return @bitCast(i32, float);
+}
+
+pub fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
+ @setRuntimeSafety(builtin.is_test);
+ const float = cmp_f80(GE, a, b);
+ return @bitCast(i32, float);
+}
+
+pub fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __lexf2(a, b);
+}
+
+pub fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __lexf2(a, b);
+}
+
+pub fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __lexf2(a, b);
+}
+
+pub fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __gexf2(a, b);
+}
+
// Comparison between f128
pub fn __letf2(a: f128, b: f128) callconv(.C) i32 {
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index b97f009d62..a62daf0d63 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -3234,6 +3234,49 @@ static LLVMValueRef get_soft_f80_bin_op_func(CodeGen *g, const char *name, int p
return LLVMAddFunction(g->module, name, fn_type);
}
+enum SoftF80Icmp {
+ NONE,
+ EQ_ZERO,
+ NE_ZERO,
+ LE_ZERO,
+ EQ_NEG,
+ GE_ZERO,
+ EQ_ONE,
+};
+
+static LLVMValueRef add_f80_icmp(CodeGen *g, LLVMValueRef val, SoftF80Icmp kind) {
+ switch (kind) {
+ case NONE:
+ return val;
+ case EQ_ZERO: {
+ LLVMValueRef zero = LLVMConstInt(g->builtin_types.entry_i32->llvm_type, 0, true);
+ return LLVMBuildICmp(g->builder, LLVMIntEQ, val, zero, "");
+ }
+ case NE_ZERO: {
+ LLVMValueRef zero = LLVMConstInt(g->builtin_types.entry_i32->llvm_type, 0, true);
+ return LLVMBuildICmp(g->builder, LLVMIntNE, val, zero, "");
+ }
+ case LE_ZERO: {
+ LLVMValueRef zero = LLVMConstInt(g->builtin_types.entry_i32->llvm_type, 0, true);
+ return LLVMBuildICmp(g->builder, LLVMIntSLE, val, zero, "");
+ }
+ case EQ_NEG: {
+ LLVMValueRef zero = LLVMConstInt(g->builtin_types.entry_i32->llvm_type, -1, true);
+ return LLVMBuildICmp(g->builder, LLVMIntEQ, val, zero, "");
+ }
+ case GE_ZERO: {
+ LLVMValueRef zero = LLVMConstInt(g->builtin_types.entry_i32->llvm_type, 0, true);
+ return LLVMBuildICmp(g->builder, LLVMIntSGE, val, zero, "");
+ }
+ case EQ_ONE: {
+ LLVMValueRef zero = LLVMConstInt(g->builtin_types.entry_i32->llvm_type, 1, true);
+ return LLVMBuildICmp(g->builder, LLVMIntEQ, val, zero, "");
+ }
+ default:
+ zig_unreachable();
+ }
+}
+
static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable,
Stage1AirInstBinOp *bin_op_instruction)
{
@@ -3249,6 +3292,7 @@ static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable,
LLVMTypeRef return_type = g->builtin_types.entry_f80->llvm_type;
int param_count = 2;
const char *func_name;
+ SoftF80Icmp res_icmp = NONE;
switch (op_id) {
case IrBinOpInvalid:
case IrBinOpArrayCat:
@@ -3274,20 +3318,32 @@ static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable,
case IrBinOpCmpEq:
return_type = g->builtin_types.entry_i32->llvm_type;
func_name = "__eqxf2";
+ res_icmp = EQ_ZERO;
break;
case IrBinOpCmpNotEq:
return_type = g->builtin_types.entry_i32->llvm_type;
func_name = "__nexf2";
+ res_icmp = NE_ZERO;
break;
case IrBinOpCmpLessOrEq:
+ return_type = g->builtin_types.entry_i32->llvm_type;
+ func_name = "__lexf2";
+ res_icmp = LE_ZERO;
+ break;
case IrBinOpCmpLessThan:
return_type = g->builtin_types.entry_i32->llvm_type;
func_name = "__lexf2";
+ res_icmp = EQ_NEG;
break;
case IrBinOpCmpGreaterOrEq:
+ return_type = g->builtin_types.entry_i32->llvm_type;
+ func_name = "__gexf2";
+ res_icmp = GE_ZERO;
+ break;
case IrBinOpCmpGreaterThan:
return_type = g->builtin_types.entry_i32->llvm_type;
func_name = "__gexf2";
+ res_icmp = EQ_ONE;
break;
case IrBinOpMaximum:
func_name = "__fmaxx";
@@ -3338,8 +3394,11 @@ static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable,
if (vector_len == 0) {
LLVMValueRef params[2] = {op1_value, op2_value};
result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
+ result = add_f80_icmp(g, result, res_icmp);
} else {
- result = build_alloca(g, op1->value->type, "", 0);
+ ZigType *alloca_ty = op1->value->type;
+ if (res_icmp != NONE) alloca_ty = get_vector_type(g, vector_len, g->builtin_types.entry_bool);
+ result = build_alloca(g, alloca_ty, "", 0);
}
LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
@@ -3350,6 +3409,7 @@ static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable,
LLVMBuildExtractElement(g->builder, op2_value, index_value, ""),
};
LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
+ call_result = add_f80_icmp(g, call_result, res_icmp);
LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
call_result, index_value, "");
}
From 6a736f0c8c187f2fcaeed4b60bf9e54aa719ae02 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Fri, 21 Jan 2022 21:49:02 +0200
Subject: [PATCH 0033/2031] compiler-rt: add add/sub for f80
---
lib/std/special/compiler_rt.zig | 4 +
lib/std/special/compiler_rt/addXf3.zig | 171 +++++++++++++++++++++++++
2 files changed, 175 insertions(+)
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index d83e94be8f..da21745cce 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -237,12 +237,16 @@ comptime {
@export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
@export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
+ const __addxf3 = @import("compiler_rt/addXf3.zig").__addxf3;
+ @export(__addxf3, .{ .name = "__addxf3", .linkage = linkage });
const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
@export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
@export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
@export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
+ const __subxf3 = @import("compiler_rt/addXf3.zig").__subxf3;
+ @export(__subxf3, .{ .name = "__subxf3", .linkage = linkage });
const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
@export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/std/special/compiler_rt/addXf3.zig
index 4c74110310..41ff00e95d 100644
--- a/lib/std/special/compiler_rt/addXf3.zig
+++ b/lib/std/special/compiler_rt/addXf3.zig
@@ -225,6 +225,177 @@ fn addXf3(comptime T: type, a: T, b: T) T {
return @bitCast(T, result);
}
+fn normalize_f80(exp: *i32, significand: *u80) void {
+ const shift = @clz(u64, @truncate(u64, significand.*));
+ significand.* = (significand.* << shift);
+ exp.* += -@as(i8, shift);
+}
+
+pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
+ var a_rep align(16) = @ptrCast(*const std.math.F80Repr, &a).*;
+ var b_rep align(16) = @ptrCast(*const std.math.F80Repr, &b).*;
+ var a_exp: i32 = a_rep.exp & 0x7FFF;
+ var b_exp: i32 = b_rep.exp & 0x7FFF;
+
+ const significand_bits = std.math.floatMantissaBits(f80);
+ const int_bit = 0x8000000000000000;
+ const significand_mask = 0x7FFFFFFFFFFFFFFF;
+ const qnan_bit = 0xC000000000000000;
+ const max_exp = 0x7FFF;
+ const sign_bit = 0x8000;
+
+ // Detect if a or b is infinity, or NaN.
+ if (a_exp == max_exp) {
+ if (a_rep.fraction ^ int_bit == 0) {
+ if (b_exp == max_exp and (b_rep.fraction ^ int_bit == 0)) {
+ // +/-infinity + -/+infinity = qNaN
+ return std.math.qnan_f80;
+ }
+ // +/-infinity + anything = +/- infinity
+ return a;
+ } else {
+ std.debug.assert(a_rep.fraction & significand_mask != 0);
+ // NaN + anything = qNaN
+ a_rep.fraction |= qnan_bit;
+ return @ptrCast(*const f80, &a_rep).*;
+ }
+ }
+ if (b_exp == max_exp) {
+ if (b_rep.fraction ^ int_bit == 0) {
+ // anything + +/-infinity = +/-infinity
+ return b;
+ } else {
+ std.debug.assert(b_rep.fraction & significand_mask != 0);
+ // anything + NaN = qNaN
+ b_rep.fraction |= qnan_bit;
+ return @ptrCast(*const f80, &b_rep).*;
+ }
+ }
+
+ const a_zero = (a_rep.fraction | @bitCast(u32, a_exp)) == 0;
+ const b_zero = (b_rep.fraction | @bitCast(u32, b_exp)) == 0;
+ if (a_zero) {
+ // zero + anything = anything
+ if (b_zero) {
+ // but we need to get the sign right for zero + zero
+ a_rep.exp &= b_rep.exp;
+ return @ptrCast(*const f80, &a_rep).*;
+ } else {
+ return b;
+ }
+ } else if (b_zero) {
+ // anything + zero = anything
+ return a;
+ }
+
+ var a_int: u80 = a_rep.fraction | (@as(u80, a_rep.exp & max_exp) << significand_bits);
+ var b_int: u80 = b_rep.fraction | (@as(u80, b_rep.exp & max_exp) << significand_bits);
+
+ // Swap a and b if necessary so that a has the larger absolute value.
+ if (b_int > a_int) {
+ const temp = a_rep;
+ a_rep = b_rep;
+ b_rep = temp;
+ }
+
+ // Extract the exponent and significand from the (possibly swapped) a and b.
+ a_exp = a_rep.exp & max_exp;
+ b_exp = b_rep.exp & max_exp;
+ a_int = a_rep.fraction;
+ b_int = b_rep.fraction;
+
+ // Normalize any denormals, and adjust the exponent accordingly.
+ normalize_f80(&a_exp, &a_int);
+ normalize_f80(&b_exp, &b_int);
+
+ // The sign of the result is the sign of the larger operand, a. If they
+ // have opposite signs, we are performing a subtraction; otherwise addition.
+ const result_sign = a_rep.exp & sign_bit;
+ const subtraction = (a_rep.exp ^ b_rep.exp) & sign_bit != 0;
+
+ // Shift the significands to give us round, guard and sticky, and or in the
+ // implicit significand bit. (If we fell through from the denormal path it
+ // was already set by normalize( ), but setting it twice won't hurt
+ // anything.)
+ a_int = a_int << 3;
+ b_int = b_int << 3;
+
+ // Shift the significand of b by the difference in exponents, with a sticky
+ // bottom bit to get rounding correct.
+ const @"align" = @intCast(u80, a_exp - b_exp);
+ if (@"align" != 0) {
+ if (@"align" < 80) {
+ const sticky = if (b_int << @intCast(u7, 80 - @"align") != 0) @as(u80, 1) else 0;
+ b_int = (b_int >> @truncate(u7, @"align")) | sticky;
+ } else {
+ b_int = 1; // sticky; b is known to be non-zero.
+ }
+ }
+ if (subtraction) {
+ a_int -= b_int;
+ // If a == -b, return +zero.
+ if (a_int == 0) return 0.0;
+
+ // If partial cancellation occurred, we need to left-shift the result
+ // and adjust the exponent:
+ if (a_int < int_bit << 3) {
+ const shift = @intCast(i32, @clz(u80, a_int)) - @intCast(i32, @clz(u80, int_bit << 3));
+ a_int <<= @intCast(u7, shift);
+ a_exp -= shift;
+ }
+ } else { // addition
+ a_int += b_int;
+
+ // If the addition carried up, we need to right-shift the result and
+ // adjust the exponent:
+ if (a_int & (int_bit << 4) != 0) {
+ const sticky = a_int & 1;
+ a_int = a_int >> 1 | sticky;
+ a_exp += 1;
+ }
+ }
+
+ // If we have overflowed the type, return +/- infinity:
+ if (a_exp >= max_exp) {
+ a_rep.exp = max_exp | result_sign;
+ a_rep.fraction = int_bit; // integer bit is set for +/-inf
+ return @ptrCast(*const f80, &a_rep).*;
+ }
+
+ if (a_exp <= 0) {
+ // Result is denormal before rounding; the exponent is zero and we
+ // need to shift the significand.
+ const shift = @intCast(u80, 1 - a_exp);
+ const sticky = if (a_int << @intCast(u7, 80 - shift) != 0) @as(u1, 1) else 0;
+ a_int = a_int >> @intCast(u7, shift | sticky);
+ a_exp = 0;
+ }
+
+ // Low three bits are round, guard, and sticky.
+ const round_guard_sticky = @truncate(u3, a_int);
+
+ // Shift the significand into place.
+ a_int = @truncate(u64, a_int >> 3);
+
+ // // Insert the exponent and sign.
+ a_int |= (@intCast(u80, a_exp) | result_sign) << significand_bits;
+
+ // Final rounding. The result may overflow to infinity, but that is the
+ // correct result in that case.
+ if (round_guard_sticky > 0x4) a_int += 1;
+ if (round_guard_sticky == 0x4) a_int += a_int & 1;
+
+ a_rep.fraction = @truncate(u64, a_int);
+ a_rep.exp = @truncate(u16, a_int >> significand_bits);
+ return @ptrCast(*const f80, &a_rep).*;
+}
+
+pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
+ var b_rep align(16) = @ptrCast(*const std.math.F80Repr, &b).*;
+ b_rep.exp ^= 0x8000;
+ return __addxf3(a, @ptrCast(*const f80, &b_rep).*);
+}
+
test {
_ = @import("addXf3_test.zig");
}
From b2f84c6714c30589c35fce72bab530cef4b05eca Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sat, 22 Jan 2022 14:07:15 +0200
Subject: [PATCH 0034/2031] stage1: implement f80 negation on non native
targets
---
src/stage1/codegen.cpp | 35 +++++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index a62daf0d63..1b11f32397 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -4106,12 +4106,47 @@ static LLVMValueRef ir_render_binary_not(CodeGen *g, Stage1Air *executable,
return LLVMBuildNot(g->builder, operand, "");
}
+static LLVMValueRef ir_gen_soft_f80_neg(CodeGen *g, ZigType *op_type, LLVMValueRef operand) {
+ uint32_t vector_len = op_type->id == ZigTypeIdVector ? op_type->data.vector.len : 0;
+
+ uint64_t buf[2] = {0, 0};
+ if (g->is_big_endian != native_is_big_endian) {
+ buf[1] = 0x8000000000000000;
+ } else {
+ buf[1] = 0x8000;
+ }
+ LLVMValueRef sign_mask = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf);
+
+ LLVMValueRef result;
+ if (vector_len == 0) {
+ result = LLVMBuildXor(g->builder, operand, sign_mask, "");
+ } else {
+ result = build_alloca(g, op_type, "", 0);
+ }
+
+ LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
+ for (uint32_t i = 0; i < vector_len; i++) {
+ LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
+ LLVMValueRef xor_operand = LLVMBuildExtractElement(g->builder, operand, index_value, "");
+ LLVMValueRef xor_result = LLVMBuildXor(g->builder, xor_operand, sign_mask, "");
+ LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
+ xor_result, index_value, "");
+ }
+ if (vector_len != 0) {
+ result = LLVMBuildLoad(g->builder, result, "");
+ }
+ return result;
+}
+
static LLVMValueRef ir_gen_negation(CodeGen *g, Stage1AirInst *inst, Stage1AirInst *operand, bool wrapping) {
LLVMValueRef llvm_operand = ir_llvm_value(g, operand);
ZigType *operand_type = operand->value->type;
ZigType *scalar_type = (operand_type->id == ZigTypeIdVector) ?
operand_type->data.vector.elem_type : operand_type;
+ if (scalar_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target))
+ return ir_gen_soft_f80_neg(g, operand_type, llvm_operand);
+
if (scalar_type->id == ZigTypeIdFloat) {
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, inst));
return LLVMBuildFNeg(g->builder, llvm_operand, "");
From 5a7d43df23664385d6841ef98b17ff9447be1ec6 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sat, 29 Jan 2022 17:30:18 +0200
Subject: [PATCH 0035/2031] stage1: make f80 always size 16, align 16
---
lib/std/math.zig | 2 ++
src/stage1/codegen.cpp | 15 +++++++++------
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 43ad49889d..6802d420fd 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -46,8 +46,10 @@ pub const f128_toint = 1.0 / f128_epsilon;
pub const F80Repr = if (@import("builtin").cpu.arch.endian() == .Little) extern struct {
fraction: u64,
exp: u16,
+ _pad: u32 = undefined,
} else extern struct {
exp: u16,
+ _pad: u32 = undefined, // TODO verify compatibility with hardware
fraction: u64,
};
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 1b11f32397..96576f1721 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -8197,6 +8197,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n
buf[1] = tmp;
#endif
LLVMValueRef as_i128 = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf);
+ if (!target_has_f80(g->zig_target)) return as_i128;
LLVMValueRef as_int = LLVMConstTrunc(as_i128, LLVMIntType(80));
return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry));
}
@@ -9420,13 +9421,15 @@ static void define_builtin_types(CodeGen *g) {
add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64);
add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128);
- if (target_has_f80(g->zig_target)) {
- add_fp_entry(g, "f80", 80, LLVMX86FP80Type(), &g->builtin_types.entry_f80);
- } else {
+ {
ZigType *entry = new_type_table_entry(ZigTypeIdFloat);
- entry->llvm_type = get_int_type(g, false, 128)->llvm_type;
- entry->size_in_bits = 8 * LLVMStoreSizeOfType(g->target_data_ref, entry->llvm_type);
- entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type);
+ if (target_has_f80(g->zig_target)) {
+ entry->llvm_type = LLVMX86FP80Type();
+ } else {
+ entry->llvm_type = get_int_type(g, false, 128)->llvm_type;
+ }
+ entry->size_in_bits = 8 * 16;
+ entry->abi_size = 16;
entry->abi_align = 16;
buf_init_from_str(&entry->name, "f80");
entry->data.floating.bit_count = 80;
From 01d48e55a5aa683828dcb88fee2d811c8262d3e9 Mon Sep 17 00:00:00 2001
From: Jan Philipp Hafer
Date: Sat, 5 Feb 2022 03:32:29 +0100
Subject: [PATCH 0036/2031] compiler_rt: optimize mulo
- use usize to decide if register size is big enough to store
multiplication result or if division is necessary
- multiplication routine with check of integer bounds
- wrapping multipliation and division routine from Hacker's Delight
---
lib/std/special/compiler_rt/mulo.zig | 97 ++++++++++++++--------------
1 file changed, 49 insertions(+), 48 deletions(-)
diff --git a/lib/std/special/compiler_rt/mulo.zig b/lib/std/special/compiler_rt/mulo.zig
index 9fa5d3830b..df4c98134c 100644
--- a/lib/std/special/compiler_rt/mulo.zig
+++ b/lib/std/special/compiler_rt/mulo.zig
@@ -1,67 +1,68 @@
const builtin = @import("builtin");
+const std = @import("std");
+const math = std.math;
// mulo - multiplication overflow
-// - muloXi4_generic for unoptimized version
+// * return a*b.
+// * return if a*b overflows => 1 else => 0
+// - muloXi4_genericSmall as default
+// - muloXi4_genericFast for 2*bitsize <= usize
-// return a*b.
-// return if a*b overflows => 1 else => 0
-// see https://stackoverflow.com/a/26320664 for possible implementations
-
-inline fn muloXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+inline fn muloXi4_genericSmall(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
@setRuntimeSafety(builtin.is_test);
- const BSIZE = @bitSizeOf(ST);
- comptime var UT = switch (ST) {
- i32 => u32,
- i64 => u64,
- i128 => u128,
+ overflow.* = 0;
+ const min = math.minInt(ST);
+ var res: ST = a *% b;
+ // Hacker's Delight section Overflow subsection Multiplication
+ // case a=-2^{31}, b=-1 problem, because
+ // on some machines a*b = -2^{31} with overflow
+ // Then -2^{31}/-1 overflows and any result is possible.
+ // => check with a<0 and b=-2^{31}
+ if ((a < 0 and b == min) or (a != 0 and @divTrunc(res, a) != b))
+ overflow.* = 1;
+ return res;
+}
+
+inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ @setRuntimeSafety(builtin.is_test);
+ overflow.* = 0;
+ const EST = switch (ST) {
+ i32 => i64,
+ i64 => i128,
+ i128 => i256,
else => unreachable,
};
- const min = @bitCast(ST, @as(UT, 1 << (BSIZE - 1)));
- const max = ~min;
- overflow.* = 0;
- const result = a *% b;
-
- // edge cases
- if (a == min) {
- if (b != 0 and b != 1) overflow.* = 1;
- return result;
- }
- if (b == min) {
- if (a != 0 and a != 1) overflow.* = 1;
- return result;
- }
-
- // take sign of x sx
- const sa = a >> (BSIZE - 1);
- const sb = b >> (BSIZE - 1);
- // take absolute value of a and b via
- // abs(x) = (x^sx)) - sx
- const abs_a = (a ^ sa) -% sa;
- const abs_b = (b ^ sb) -% sb;
-
- // unitary magnitude, cannot have overflow
- if (abs_a < 2 or abs_b < 2) return result;
-
- // compare the signs of operands
- if ((a ^ b) >> (BSIZE - 1) != 0) {
- if (abs_a > @divTrunc(max, abs_b)) overflow.* = 1;
- } else {
- if (abs_a > @divTrunc(min, -abs_b)) overflow.* = 1;
- }
-
- return result;
+ const min = math.minInt(ST);
+ const max = math.maxInt(ST);
+ var res: EST = @as(EST, a) * @as(EST, b);
+ //invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1}
+ if (res < min or max < res)
+ overflow.* = 1;
+ return @truncate(ST, res);
}
pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
- return muloXi4_generic(i32, a, b, overflow);
+ if (2 * @bitSizeOf(i32) <= @bitSizeOf(usize)) {
+ return muloXi4_genericFast(i32, a, b, overflow);
+ } else {
+ return muloXi4_genericSmall(i32, a, b, overflow);
+ }
}
pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
- return muloXi4_generic(i64, a, b, overflow);
+ if (2 * @bitSizeOf(i64) <= @bitSizeOf(usize)) {
+ return muloXi4_genericFast(i64, a, b, overflow);
+ } else {
+ return muloXi4_genericSmall(i64, a, b, overflow);
+ }
}
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
- return muloXi4_generic(i128, a, b, overflow);
+ if (2 * @bitSizeOf(i128) <= @bitSizeOf(usize)) {
+ return muloXi4_genericFast(i128, a, b, overflow);
+ } else {
+ return muloXi4_genericSmall(i128, a, b, overflow);
+ }
}
test {
From 7d04ab1f14269b25a7ac03c3f787b3f3ee3453c3 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Fri, 4 Feb 2022 19:55:32 +0200
Subject: [PATCH 0037/2031] std.process: add option to support single quotes to
ArgIteratorGeneral
---
lib/std/process.zig | 62 +++++++++++++++++++++++++++++----------------
src/main.zig | 2 +-
2 files changed, 41 insertions(+), 23 deletions(-)
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 96bfed3718..699c994abf 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -303,7 +303,8 @@ pub const ArgIteratorWasi = struct {
/// Optional parameters for `ArgIteratorGeneral`
pub const ArgIteratorGeneralOptions = struct {
- comments_supported: bool = false,
+ comments: bool = false,
+ single_quotes: bool = false,
};
/// A general Iterator to parse a string into a set of arguments
@@ -387,7 +388,7 @@ pub fn ArgIteratorGeneral(comptime options: ArgIteratorGeneralOptions) type {
0 => return false,
' ', '\t', '\r', '\n' => continue,
'#' => {
- if (options.comments_supported) {
+ if (options.comments) {
while (true) : (self.index += 1) {
switch (self.cmd_line[self.index]) {
'\n' => break,
@@ -417,7 +418,11 @@ pub fn ArgIteratorGeneral(comptime options: ArgIteratorGeneralOptions) type {
const character = if (self.index != self.cmd_line.len) self.cmd_line[self.index] else 0;
switch (character) {
0 => return true,
- '"' => {
+ '"', '\'' => {
+ if (!options.single_quotes and character == '\'') {
+ backslash_count = 0;
+ continue;
+ }
const quote_is_real = backslash_count % 2 == 0;
if (quote_is_real) {
in_quote = !in_quote;
@@ -460,7 +465,13 @@ pub fn ArgIteratorGeneral(comptime options: ArgIteratorGeneralOptions) type {
self.start = self.end;
return token;
},
- '"' => {
+ '"', '\'' => {
+ if (!options.single_quotes and character == '\'') {
+ self.emitBackslashes(backslash_count);
+ backslash_count = 0;
+ self.emitCharacter(character);
+ continue;
+ }
const quote_is_real = backslash_count % 2 == 0;
self.emitBackslashes(backslash_count / 2);
backslash_count = 0;
@@ -522,7 +533,7 @@ pub fn ArgIteratorGeneral(comptime options: ArgIteratorGeneralOptions) type {
/// Cross-platform command line argument iterator.
pub const ArgIterator = struct {
const InnerType = switch (builtin.os.tag) {
- .windows => ArgIteratorGeneral(.{ .comments_supported = false }),
+ .windows => ArgIteratorGeneral(.{}),
.wasi => if (builtin.link_libc) ArgIteratorPosix else ArgIteratorWasi,
else => ArgIteratorPosix,
};
@@ -664,27 +675,30 @@ pub fn argsFree(allocator: mem.Allocator, args_alloc: []const [:0]u8) void {
}
test "general arg parsing" {
- try testGeneralCmdLine("a b\tc d", &[_][]const u8{ "a", "b", "c", "d" });
- try testGeneralCmdLine("\"abc\" d e", &[_][]const u8{ "abc", "d", "e" });
- try testGeneralCmdLine("a\\\\\\b d\"e f\"g h", &[_][]const u8{ "a\\\\\\b", "de fg", "h" });
- try testGeneralCmdLine("a\\\\\\\"b c d", &[_][]const u8{ "a\\\"b", "c", "d" });
- try testGeneralCmdLine("a\\\\\\\\\"b c\" d e", &[_][]const u8{ "a\\\\b c", "d", "e" });
- try testGeneralCmdLine("a b\tc \"d f", &[_][]const u8{ "a", "b", "c", "d f" });
- try testGeneralCmdLine("j k l\\", &[_][]const u8{ "j", "k", "l\\" });
- try testGeneralCmdLine("\"\" x y z\\\\", &[_][]const u8{ "", "x", "y", "z\\\\" });
+ try testGeneralCmdLine("a b\tc d", &.{ "a", "b", "c", "d" });
+ try testGeneralCmdLine("\"abc\" d e", &.{ "abc", "d", "e" });
+ try testGeneralCmdLine("a\\\\\\b d\"e f\"g h", &.{ "a\\\\\\b", "de fg", "h" });
+ try testGeneralCmdLine("a\\\\\\\"b c d", &.{ "a\\\"b", "c", "d" });
+ try testGeneralCmdLine("a\\\\\\\\\"b c\" d e", &.{ "a\\\\b c", "d", "e" });
+ try testGeneralCmdLine("a b\tc \"d f", &.{ "a", "b", "c", "d f" });
+ try testGeneralCmdLine("j k l\\", &.{ "j", "k", "l\\" });
+ try testGeneralCmdLine("\"\" x y z\\\\", &.{ "", "x", "y", "z\\\\" });
- try testGeneralCmdLine("\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"", &[_][]const u8{
+ try testGeneralCmdLine("\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"", &.{
".\\..\\zig-cache\\build",
"bin\\zig.exe",
".\\..",
".\\..\\zig-cache",
"--help",
});
+
+ try testGeneralCmdLine(
+ \\ 'foo' "bar"
+ , &.{ "'foo'", "bar" });
}
fn testGeneralCmdLine(input_cmd_line: []const u8, expected_args: []const []const u8) !void {
- var it = try ArgIteratorGeneral(.{ .comments_supported = false })
- .init(std.testing.allocator, input_cmd_line);
+ var it = try ArgIteratorGeneral(.{}).init(std.testing.allocator, input_cmd_line);
defer it.deinit();
for (expected_args) |expected_arg| {
const arg = it.next().?;
@@ -697,30 +711,34 @@ test "response file arg parsing" {
try testResponseFileCmdLine(
\\a b
\\c d\
- , &[_][]const u8{ "a", "b", "c", "d\\" });
- try testResponseFileCmdLine("a b c d\\", &[_][]const u8{ "a", "b", "c", "d\\" });
+ , &.{ "a", "b", "c", "d\\" });
+ try testResponseFileCmdLine("a b c d\\", &.{ "a", "b", "c", "d\\" });
try testResponseFileCmdLine(
\\j
\\ k l # this is a comment \\ \\\ \\\\ "none" "\\" "\\\"
\\ "m" #another comment
\\
- , &[_][]const u8{ "j", "k", "l", "m" });
+ , &.{ "j", "k", "l", "m" });
try testResponseFileCmdLine(
\\ "" q ""
\\ "r s # t" "u\" v" #another comment
\\
- , &[_][]const u8{ "", "q", "", "r s # t", "u\" v" });
+ , &.{ "", "q", "", "r s # t", "u\" v" });
try testResponseFileCmdLine(
\\ -l"advapi32" a# b#c d#
\\e\\\
- , &[_][]const u8{ "-ladvapi32", "a#", "b#c", "d#", "e\\\\\\" });
+ , &.{ "-ladvapi32", "a#", "b#c", "d#", "e\\\\\\" });
+
+ try testResponseFileCmdLine(
+ \\ 'foo' "bar"
+ , &.{ "foo", "bar" });
}
fn testResponseFileCmdLine(input_cmd_line: []const u8, expected_args: []const []const u8) !void {
- var it = try ArgIteratorGeneral(.{ .comments_supported = true })
+ var it = try ArgIteratorGeneral(.{ .comments = true, .single_quotes = true })
.init(std.testing.allocator, input_cmd_line);
defer it.deinit();
for (expected_args) |expected_arg| {
diff --git a/src/main.zig b/src/main.zig
index bd25c5d234..bbdb948c90 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -4230,7 +4230,7 @@ pub const ClangArgIterator = struct {
};
}
- const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments_supported = true });
+ const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, .single_quotes = true });
/// Initialize the arguments from a Response File. "*.rsp"
fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile {
From fbc06f9c9151205896fb167b087506d6580946c4 Mon Sep 17 00:00:00 2001
From: rohlem
Date: Fri, 4 Feb 2022 19:44:38 +0100
Subject: [PATCH 0038/2031] std.build.TranslateCStep: add C macro support
The string construction code is moved out of std.build.LibExeObjStep
into std.build.constructCMacroArg, to allow reusing it elsewhere.
---
lib/std/build.zig | 26 +++++++++++++++++---------
lib/std/build/TranslateCStep.zig | 19 +++++++++++++++++++
2 files changed, 36 insertions(+), 9 deletions(-)
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 395ccc5cb4..104aceea76 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -1862,15 +1862,7 @@ pub const LibExeObjStep = struct {
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *LibExeObjStep, name: []const u8, value: ?[]const u8) void {
- var macro = self.builder.allocator.alloc(
- u8,
- name.len + if (value) |value_slice| value_slice.len + 1 else 0,
- ) catch |err| if (err == error.OutOfMemory) @panic("Out of memory") else unreachable;
- mem.copy(u8, macro, name);
- if (value) |value_slice| {
- macro[name.len] = '=';
- mem.copy(u8, macro[name.len + 1 ..], value_slice);
- }
+ const macro = constructCMacro(self.builder.allocator, name, value);
self.c_macros.append(macro) catch unreachable;
}
@@ -2934,6 +2926,22 @@ pub const LibExeObjStep = struct {
}
};
+/// Allocates a new string for assigning a value to a named macro.
+/// If the value is omitted, it is set to 1.
+/// `name` and `value` need not live longer than the function call.
+pub fn constructCMacro(allocator: Allocator, name: []const u8, value: ?[]const u8) []const u8 {
+ var macro = allocator.alloc(
+ u8,
+ name.len + if (value) |value_slice| value_slice.len + 1 else 0,
+ ) catch |err| if (err == error.OutOfMemory) @panic("Out of memory") else unreachable;
+ mem.copy(u8, macro, name);
+ if (value) |value_slice| {
+ macro[name.len] = '=';
+ mem.copy(u8, macro[name.len + 1 ..], value_slice);
+ }
+ return macro;
+}
+
pub const InstallArtifactStep = struct {
pub const base_id = .install_artifact;
diff --git a/lib/std/build/TranslateCStep.zig b/lib/std/build/TranslateCStep.zig
index 0d44ebd80a..1f9bee463c 100644
--- a/lib/std/build/TranslateCStep.zig
+++ b/lib/std/build/TranslateCStep.zig
@@ -16,6 +16,7 @@ step: Step,
builder: *Builder,
source: build.FileSource,
include_dirs: std.ArrayList([]const u8),
+c_macros: std.ArrayList([]const u8),
output_dir: ?[]const u8,
out_basename: []const u8,
target: CrossTarget = CrossTarget{},
@@ -28,6 +29,7 @@ pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
.builder = builder,
.source = source,
.include_dirs = std.ArrayList([]const u8).init(builder.allocator),
+ .c_macros = std.ArrayList([]const u8).init(builder.allocator),
.output_dir = null,
.out_basename = undefined,
.output_file = build.GeneratedFile{ .step = &self.step },
@@ -53,6 +55,18 @@ pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8)
return CheckFileStep.create(self.builder, .{ .generated = &self.output_file }, self.builder.dupeStrings(expected_matches));
}
+/// If the value is omitted, it is set to 1.
+/// `name` and `value` need not live longer than the function call.
+pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
+ const macro = build.constructCMacro(self.builder.allocator, name, value);
+ self.c_macros.append(macro) catch unreachable;
+}
+
+/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
+pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
+ self.c_macros.append(self.builder.dupe(name_and_value)) catch unreachable;
+}
+
fn make(step: *Step) !void {
const self = @fieldParentPtr(TranslateCStep, "step", step);
@@ -73,6 +87,11 @@ fn make(step: *Step) !void {
try argv_list.append(include_dir);
}
+ for (self.c_macros.items) |c_macro| {
+ try argv_list.append("-D");
+ try argv_list.append(c_macro);
+ }
+
try argv_list.append(self.source.getPath(self.builder));
const output_path_nl = try self.builder.execFromStep(argv_list.items, &self.step);
From 0e1afb4d986c3316c6f024be50612b0a6c66777b Mon Sep 17 00:00:00 2001
From: gwenzek
Date: Sat, 5 Feb 2022 15:33:00 +0100
Subject: [PATCH 0039/2031] stage2: add support for Nvptx target
sample command:
/home/guw/github/zig/stage2/bin/zig build-obj cuda_kernel.zig -target nvptx64-cuda -O ReleaseSafe
this will create a kernel.ptx
expose PtxKernel call convention from LLVM
kernels are `export fn f() callconv(.PtxKernel)`
---
lib/std/builtin.zig | 1 +
lib/std/target.zig | 4 ++
lib/std/zig.zig | 1 +
src/Module.zig | 6 +-
src/Sema.zig | 1 +
src/codegen/llvm.zig | 6 +-
src/link.zig | 30 ++++++++--
src/link/NvPtx.zig | 122 +++++++++++++++++++++++++++++++++++++++
src/stage1/all_types.hpp | 3 +-
src/stage1/analyze.cpp | 12 ++++
src/stage1/codegen.cpp | 6 ++
src/stage1/ir.cpp | 1 +
12 files changed, 185 insertions(+), 8 deletions(-)
create mode 100644 src/link/NvPtx.zig
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 9e7bfc99ba..39e849fb5e 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -147,6 +147,7 @@ pub const CallingConvention = enum {
AAPCS,
AAPCSVFP,
SysV,
+ PtxKernel,
};
/// This data structure is used by the Zig language code generation and
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 182690484e..9a2dcfcc66 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -579,6 +579,8 @@ pub const Target = struct {
raw,
/// Plan 9 from Bell Labs
plan9,
+ /// Nvidia PTX format
+ nvptx,
pub fn fileExt(of: ObjectFormat, cpu_arch: Cpu.Arch) [:0]const u8 {
return switch (of) {
@@ -589,6 +591,7 @@ pub const Target = struct {
.hex => ".ihex",
.raw => ".bin",
.plan9 => plan9Ext(cpu_arch),
+ .nvptx => ".ptx",
};
}
};
@@ -1388,6 +1391,7 @@ pub const Target = struct {
else => return switch (cpu_arch) {
.wasm32, .wasm64 => .wasm,
.spirv32, .spirv64 => .spirv,
+ .nvptx, .nvptx64 => .nvptx,
else => .elf,
},
};
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index 1420db8ec2..9b8e2294f2 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -181,6 +181,7 @@ pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, ofmt.fileExt(target.cpu.arch) }),
.Lib => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name }),
},
+ .nvptx => return std.fmt.allocPrint(allocator, "{s}", .{root_name}),
}
}
diff --git a/src/Module.zig b/src/Module.zig
index e2e2505927..eeed6b2dc9 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -4242,7 +4242,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
// in `Decl` to notice that the line number did not change.
mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
},
- .c, .wasm, .spirv => {},
+ .c, .wasm, .spirv, .nvptx => {},
}
}
}
@@ -4316,6 +4316,7 @@ pub fn clearDecl(
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
+ .nvptx => .{ .nvptx = {} },
};
decl.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
@@ -4325,6 +4326,7 @@ pub fn clearDecl(
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
+ .nvptx => .{ .nvptx = .{} },
};
}
if (decl.getInnerNamespace()) |namespace| {
@@ -4652,6 +4654,7 @@ pub fn allocateNewDecl(
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
+ .nvptx => .{ .nvptx = {} },
},
.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
@@ -4661,6 +4664,7 @@ pub fn allocateNewDecl(
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
+ .nvptx => .{ .nvptx = .{} },
},
.generation = 0,
.is_pub = false,
diff --git a/src/Sema.zig b/src/Sema.zig
index c4b3ad8c33..934fa4064b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3724,6 +3724,7 @@ pub fn analyzeExport(
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
.spirv => .{ .spirv = {} },
+ .nvptx => .{ .nvptx = {} },
},
.owner_decl = owner_decl,
.src_decl = src_decl,
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 81742d4866..08fc3879a9 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -378,7 +378,7 @@ pub const Object = struct {
const mod = comp.bin_file.options.module.?;
const cache_dir = mod.zig_cache_artifact_directory;
- const emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit|
+ var emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit|
try emit.basenamePath(arena, try arena.dupeZ(u8, comp.bin_file.intermediary_basename.?))
else
null;
@@ -5078,6 +5078,10 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
},
.Signal => .AVR_SIGNAL,
.SysV => .X86_64_SysV,
+ .PtxKernel => return switch (target.cpu.arch) {
+ .nvptx, .nvptx64 => .PTX_Kernel,
+ else => unreachable,
+ },
};
}
diff --git a/src/link.zig b/src/link.zig
index 883d79de34..51e7082aa7 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -215,6 +215,7 @@ pub const File = struct {
c: void,
wasm: Wasm.DeclBlock,
spirv: void,
+ nvptx: void,
};
pub const LinkFn = union {
@@ -225,6 +226,7 @@ pub const File = struct {
c: void,
wasm: Wasm.FnData,
spirv: SpirV.FnData,
+ nvptx: void,
};
pub const Export = union {
@@ -235,6 +237,7 @@ pub const File = struct {
c: void,
wasm: void,
spirv: void,
+ nvptx: void,
};
/// For DWARF .debug_info.
@@ -274,6 +277,7 @@ pub const File = struct {
.plan9 => return &(try Plan9.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
.spirv => &(try SpirV.createEmpty(allocator, options)).base,
+ .nvptx => &(try NvPtx.createEmpty(allocator, options)).base,
.hex => return error.HexObjectFormatUnimplemented,
.raw => return error.RawObjectFormatUnimplemented,
};
@@ -292,6 +296,7 @@ pub const File = struct {
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
.spirv => &(try SpirV.createEmpty(allocator, options)).base,
+ .nvptx => &(try NvPtx.createEmpty(allocator, options)).base,
.hex => return error.HexObjectFormatUnimplemented,
.raw => return error.RawObjectFormatUnimplemented,
};
@@ -312,6 +317,7 @@ pub const File = struct {
.wasm => &(try Wasm.openPath(allocator, sub_path, options)).base,
.c => &(try C.openPath(allocator, sub_path, options)).base,
.spirv => &(try SpirV.openPath(allocator, sub_path, options)).base,
+ .nvptx => &(try NvPtx.openPath(allocator, sub_path, options)).base,
.hex => return error.HexObjectFormatUnimplemented,
.raw => return error.RawObjectFormatUnimplemented,
};
@@ -344,7 +350,7 @@ pub const File = struct {
.mode = determineMode(base.options),
});
},
- .c, .wasm, .spirv => {},
+ .c, .wasm, .spirv, .nvptx => {},
}
}
@@ -389,7 +395,7 @@ pub const File = struct {
f.close();
base.file = null;
},
- .c, .wasm, .spirv => {},
+ .c, .wasm, .spirv, .nvptx => {},
}
}
@@ -437,6 +443,7 @@ pub const File = struct {
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl),
+ .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDecl(module, decl),
// zig fmt: on
}
}
@@ -456,6 +463,7 @@ pub const File = struct {
.wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness),
+ .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness),
// zig fmt: on
}
}
@@ -471,7 +479,7 @@ pub const File = struct {
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
.plan9 => @panic("TODO: implement updateDeclLineNumber for plan9"),
- .wasm, .spirv => {},
+ .wasm, .spirv, .nvptx => {},
}
}
@@ -493,7 +501,7 @@ pub const File = struct {
},
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl),
- .c, .spirv => {},
+ .c, .spirv, .nvptx => {},
}
}
@@ -551,6 +559,11 @@ pub const File = struct {
parent.deinit();
base.allocator.destroy(parent);
},
+ .nvptx => {
+ const parent = @fieldParentPtr(NvPtx, "base", base);
+ parent.deinit();
+ base.allocator.destroy(parent);
+ },
}
}
@@ -584,6 +597,7 @@ pub const File = struct {
.wasm => return @fieldParentPtr(Wasm, "base", base).flush(comp),
.spirv => return @fieldParentPtr(SpirV, "base", base).flush(comp),
.plan9 => return @fieldParentPtr(Plan9, "base", base).flush(comp),
+ .nvptx => return @fieldParentPtr(NvPtx, "base", base).flush(comp),
}
}
@@ -598,6 +612,7 @@ pub const File = struct {
.wasm => return @fieldParentPtr(Wasm, "base", base).flushModule(comp),
.spirv => return @fieldParentPtr(SpirV, "base", base).flushModule(comp),
.plan9 => return @fieldParentPtr(Plan9, "base", base).flushModule(comp),
+ .nvptx => return @fieldParentPtr(NvPtx, "base", base).flushModule(comp),
}
}
@@ -612,6 +627,7 @@ pub const File = struct {
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
.spirv => @fieldParentPtr(SpirV, "base", base).freeDecl(decl),
.plan9 => @fieldParentPtr(Plan9, "base", base).freeDecl(decl),
+ .nvptx => @fieldParentPtr(NvPtx, "base", base).freeDecl(decl),
}
}
@@ -622,7 +638,7 @@ pub const File = struct {
.macho => return @fieldParentPtr(MachO, "base", base).error_flags,
.plan9 => return @fieldParentPtr(Plan9, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
- .wasm, .spirv => return ErrorFlags{},
+ .wasm, .spirv, .nvptx => return ErrorFlags{},
}
}
@@ -644,6 +660,7 @@ pub const File = struct {
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateDeclExports(module, decl, exports),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclExports(module, decl, exports),
+ .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDeclExports(module, decl, exports),
}
}
@@ -656,6 +673,7 @@ pub const File = struct {
.c => unreachable,
.wasm => unreachable,
.spirv => unreachable,
+ .nvptx => unreachable,
}
}
@@ -851,6 +869,7 @@ pub const File = struct {
wasm,
spirv,
plan9,
+ nvptx,
};
pub const ErrorFlags = struct {
@@ -864,6 +883,7 @@ pub const File = struct {
pub const MachO = @import("link/MachO.zig");
pub const SpirV = @import("link/SpirV.zig");
pub const Wasm = @import("link/Wasm.zig");
+ pub const NvPtx = @import("link/NvPtx.zig");
};
pub fn determineMode(options: Options) fs.File.Mode {
diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig
new file mode 100644
index 0000000000..77613cdc1d
--- /dev/null
+++ b/src/link/NvPtx.zig
@@ -0,0 +1,122 @@
+//! NVidia PTX (Paralle Thread Execution)
+//! https://docs.nvidia.com/cuda/parallel-thread-execution/index.html
+//! For this we rely on the nvptx backend of LLVM
+//! Kernel functions need to be marked both as "export" and "callconv(.PtxKernel)"
+
+const NvPtx = @This();
+
+const std = @import("std");
+const builtin = @import("builtin");
+
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+
+const Module = @import("../Module.zig");
+const Compilation = @import("../Compilation.zig");
+const link = @import("../link.zig");
+const trace = @import("../tracy.zig").trace;
+const build_options = @import("build_options");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
+const LlvmObject = @import("../codegen/llvm.zig").Object;
+
+base: link.File,
+llvm_object: *LlvmObject,
+
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*NvPtx {
+ if (!build_options.have_llvm) return error.TODOArchNotSupported;
+
+ const nvptx = try gpa.create(NvPtx);
+ nvptx.* = .{
+ .base = .{
+ .tag = .nvptx,
+ .options = options,
+ .file = null,
+ .allocator = gpa,
+ },
+ .llvm_object = undefined,
+ };
+
+ switch (options.target.cpu.arch) {
+ .nvptx, .nvptx64 => {},
+ else => return error.TODOArchNotSupported,
+ }
+
+ switch (options.target.os.tag) {
+ // TODO: does it also work with nvcl ?
+ .cuda => {},
+ else => return error.TODOOsNotSupported,
+ }
+
+ return nvptx;
+}
+
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*NvPtx {
+ if (!build_options.have_llvm) @panic("nvptx target requires a zig compiler with llvm enabled.");
+ if (!options.use_llvm) return error.TODOArchNotSupported;
+ assert(options.object_format == .nvptx);
+
+ const nvptx = try createEmpty(allocator, options);
+ errdefer nvptx.base.destroy();
+ log.info("Opening .ptx target file {s}", .{sub_path});
+ nvptx.llvm_object = try LlvmObject.create(allocator, options);
+ return nvptx;
+}
+
+pub fn deinit(self: *NvPtx) void {
+ if (!build_options.have_llvm) return;
+ self.llvm_object.destroy(self.base.allocator);
+}
+
+pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (!build_options.have_llvm) return;
+ try self.llvm_object.updateFunc(module, func, air, liveness);
+}
+
+pub fn updateDecl(self: *NvPtx, module: *Module, decl: *Module.Decl) !void {
+ if (!build_options.have_llvm) return;
+ return self.llvm_object.updateDecl(module, decl);
+}
+
+pub fn updateDeclExports(
+ self: *NvPtx,
+ module: *Module,
+ decl: *const Module.Decl,
+ exports: []const *Module.Export,
+) !void {
+ if (!build_options.have_llvm) return;
+ if (build_options.skip_non_native and builtin.object_format != .nvptx) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ return self.llvm_object.updateDeclExports(module, decl, exports);
+}
+
+pub fn freeDecl(self: *NvPtx, decl: *Module.Decl) void {
+ if (!build_options.have_llvm) return;
+ return self.llvm_object.freeDecl(decl);
+}
+
+pub fn flush(self: *NvPtx, comp: *Compilation) !void {
+ return self.flushModule(comp);
+}
+
+pub fn flushModule(self: *NvPtx, comp: *Compilation) !void {
+ if (!build_options.have_llvm) return;
+ if (build_options.skip_non_native) {
+ @panic("Attempted to compile for architecture that was disabled by build configuration");
+ }
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var hack_comp = comp;
+ if (comp.bin_file.options.emit) |emit| {
+ hack_comp.emit_asm = .{
+ .directory = emit.directory,
+ .basename = comp.bin_file.intermediary_basename.?,
+ };
+ hack_comp.bin_file.options.emit = null;
+ }
+
+ return try self.llvm_object.flushModule(hack_comp);
+}
diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp
index b3c578d95a..36f136c77f 100644
--- a/src/stage1/all_types.hpp
+++ b/src/stage1/all_types.hpp
@@ -83,7 +83,8 @@ enum CallingConvention {
CallingConventionAPCS,
CallingConventionAAPCS,
CallingConventionAAPCSVFP,
- CallingConventionSysV
+ CallingConventionSysV,
+ CallingConventionPtxKernel
};
// Stage 1 supports only the generic address space
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index dfe7452cfc..0dcf1fcc06 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -991,6 +991,7 @@ const char *calling_convention_name(CallingConvention cc) {
case CallingConventionAAPCSVFP: return "AAPCSVFP";
case CallingConventionInline: return "Inline";
case CallingConventionSysV: return "SysV";
+ case CallingConventionPtxKernel: return "PtxKernel";
}
zig_unreachable();
}
@@ -1000,6 +1001,7 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
case CallingConventionUnspecified:
case CallingConventionAsync:
case CallingConventionInline:
+ case CallingConventionPtxKernel:
return true;
case CallingConventionC:
case CallingConventionNaked:
@@ -2006,6 +2008,15 @@ Error emit_error_unless_callconv_allowed_for_target(CodeGen *g, AstNode *source_
case CallingConventionSysV:
if (g->zig_target->arch != ZigLLVM_x86_64)
allowed_platforms = "x86_64";
+ break;
+ case CallingConventionPtxKernel:
+ if (g->zig_target->arch != ZigLLVM_nvptx
+ && g->zig_target->arch != ZigLLVM_nvptx64)
+ {
+ allowed_platforms = "nvptx and nvptx64";
+ }
+ break;
+
}
if (allowed_platforms != nullptr) {
add_node_error(g, source_node, buf_sprintf(
@@ -3827,6 +3838,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
case CallingConventionAAPCS:
case CallingConventionAAPCSVFP:
case CallingConventionSysV:
+ case CallingConventionPtxKernel:
add_fn_export(g, fn_table_entry, buf_ptr(&fn_table_entry->symbol_name),
GlobalLinkageIdStrong, fn_cc);
break;
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 154e982ff9..4e9d6313db 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -209,6 +209,11 @@ static ZigLLVM_CallingConv get_llvm_cc(CodeGen *g, CallingConvention cc) {
case CallingConventionSysV:
assert(g->zig_target->arch == ZigLLVM_x86_64);
return ZigLLVM_X86_64_SysV;
+ case CallingConventionPtxKernel:
+ assert(g->zig_target->arch == ZigLLVM_nvptx ||
+ g->zig_target->arch == ZigLLVM_nvptx64);
+ return ZigLLVM_PTX_Kernel;
+
}
zig_unreachable();
}
@@ -354,6 +359,7 @@ static bool cc_want_sret_attr(CallingConvention cc) {
case CallingConventionAAPCS:
case CallingConventionAAPCSVFP:
case CallingConventionSysV:
+ case CallingConventionPtxKernel:
return true;
case CallingConventionAsync:
case CallingConventionUnspecified:
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index 5694db22ee..be6226313f 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -11666,6 +11666,7 @@ static Stage1AirInst *ir_analyze_instruction_export(IrAnalyze *ira, Stage1ZirIns
case CallingConventionAAPCS:
case CallingConventionAAPCSVFP:
case CallingConventionSysV:
+ case CallingConventionPtxKernel:
add_fn_export(ira->codegen, fn_entry, buf_ptr(symbol_name), global_linkage_id, cc);
fn_entry->section_name = section_name;
break;
From f2a82bafae414e9b454b77f6d0bcf768cfa9089b Mon Sep 17 00:00:00 2001
From: praschke
Date: Sat, 27 Nov 2021 16:40:26 +0000
Subject: [PATCH 0040/2031] std: allow tests to use cache and setOutputDir
---
lib/std/build.zig | 50 ++++++++++++++++++++++-------------------------
1 file changed, 23 insertions(+), 27 deletions(-)
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 104aceea76..ca1c708583 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -2862,40 +2862,36 @@ pub const LibExeObjStep = struct {
});
}
- if (self.kind == .@"test") {
- _ = try builder.execFromStep(zig_args.items, step);
- } else {
- try zig_args.append("--enable-cache");
+ try zig_args.append("--enable-cache");
- const output_dir_nl = try builder.execFromStep(zig_args.items, &self.step);
- const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n");
+ const output_dir_nl = try builder.execFromStep(zig_args.items, &self.step);
+ const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n");
- if (self.output_dir) |output_dir| {
- var src_dir = try std.fs.cwd().openDir(build_output_dir, .{ .iterate = true });
- defer src_dir.close();
+ if (self.output_dir) |output_dir| {
+ var src_dir = try std.fs.cwd().openDir(build_output_dir, .{ .iterate = true });
+ defer src_dir.close();
- // Create the output directory if it doesn't exist.
- try std.fs.cwd().makePath(output_dir);
+ // Create the output directory if it doesn't exist.
+ try std.fs.cwd().makePath(output_dir);
- var dest_dir = try std.fs.cwd().openDir(output_dir, .{});
- defer dest_dir.close();
+ var dest_dir = try std.fs.cwd().openDir(output_dir, .{});
+ defer dest_dir.close();
- var it = src_dir.iterate();
- while (try it.next()) |entry| {
- // The compiler can put these files into the same directory, but we don't
- // want to copy them over.
- if (mem.eql(u8, entry.name, "stage1.id") or
- mem.eql(u8, entry.name, "llvm-ar.id") or
- mem.eql(u8, entry.name, "libs.txt") or
- mem.eql(u8, entry.name, "builtin.zig") or
- mem.eql(u8, entry.name, "zld.id") or
- mem.eql(u8, entry.name, "lld.id")) continue;
+ var it = src_dir.iterate();
+ while (try it.next()) |entry| {
+ // The compiler can put these files into the same directory, but we don't
+ // want to copy them over.
+ if (mem.eql(u8, entry.name, "stage1.id") or
+ mem.eql(u8, entry.name, "llvm-ar.id") or
+ mem.eql(u8, entry.name, "libs.txt") or
+ mem.eql(u8, entry.name, "builtin.zig") or
+ mem.eql(u8, entry.name, "zld.id") or
+ mem.eql(u8, entry.name, "lld.id")) continue;
- _ = try src_dir.updateFile(entry.name, dest_dir, entry.name, .{});
- }
- } else {
- self.output_dir = build_output_dir;
+ _ = try src_dir.updateFile(entry.name, dest_dir, entry.name, .{});
}
+ } else {
+ self.output_dir = build_output_dir;
}
// This will ensure all output filenames will now have the output_dir available!
From d4c3475f3dce8ecf81ba2baaf97b6d25426919ec Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 5 Feb 2022 12:44:32 +0100
Subject: [PATCH 0041/2031] stage2 ARM: clarify usage of unfreezeRegs in
airSliceElemVal
---
src/arch/arm/CodeGen.zig | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 3d334656a1..3ab395f069 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -1247,7 +1247,6 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
};
self.register_manager.freezeRegs(&.{base_mcv.register});
- defer self.register_manager.unfreezeRegs(&.{base_mcv.register});
switch (elem_size) {
1, 4 => {
@@ -1283,6 +1282,8 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
} },
});
+ self.register_manager.unfreezeRegs(&.{base_mcv.register});
+
break :result dst_mcv;
},
else => {
@@ -1291,7 +1292,6 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const offset_mcv = try self.genArmMulConstant(bin_op.rhs, @intCast(u32, elem_size));
assert(offset_mcv == .register); // result of multiplication should always be register
self.register_manager.freezeRegs(&.{offset_mcv.register});
- defer self.register_manager.unfreezeRegs(&.{offset_mcv.register});
const addr_reg = try self.register_manager.allocReg(null);
self.register_manager.freezeRegs(&.{addr_reg});
@@ -1299,11 +1299,9 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
try self.genArmBinOpCode(addr_reg, base_mcv, offset_mcv, false, .add, .unsigned);
- // I know we will unfreeze these registers at the end of
- // the scope of :result. However, at this point in time,
- // neither the base register nor the offset register
- // contains any valuable data anymore. In order to reduce
- // register pressure, unfreeze them prematurely
+ // At this point in time, neither the base register
+ // nor the offset register contains any valuable data
+ // anymore.
self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
From 4b3b487627d71fa082b0316383344b49c95bab0e Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 5 Feb 2022 17:13:11 +0100
Subject: [PATCH 0042/2031] stage2 regalloc: Introduce error.OutOfRegisters
---
src/arch/aarch64/CodeGen.zig | 7 +++
src/arch/arm/CodeGen.zig | 7 +++
src/arch/riscv64/CodeGen.zig | 7 +++
src/arch/x86_64/CodeGen.zig | 7 +++
src/register_manager.zig | 101 +++++++++++++++++++++++++----------
5 files changed, 100 insertions(+), 29 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 6202d2e74f..2b8c5e62d4 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -31,6 +31,7 @@ const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const InnerError = error{
OutOfMemory,
CodegenFail,
+ OutOfRegisters,
};
gpa: Allocator,
@@ -274,6 +275,9 @@ pub fn generate(
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -285,6 +289,9 @@ pub fn generate(
function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 3ab395f069..c87f750831 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -31,6 +31,7 @@ const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const InnerError = error{
OutOfMemory,
CodegenFail,
+ OutOfRegisters,
};
gpa: Allocator,
@@ -279,6 +280,9 @@ pub fn generate(
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -290,6 +294,9 @@ pub fn generate(
function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 9e850bd751..612ff78bd6 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -31,6 +31,7 @@ const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const InnerError = error{
OutOfMemory,
CodegenFail,
+ OutOfRegisters,
};
gpa: Allocator,
@@ -280,6 +281,9 @@ pub fn generate(
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -291,6 +295,9 @@ pub fn generate(
function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index b3a292a7f4..e464dd3cf9 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -31,6 +31,7 @@ const Zir = @import("../../Zir.zig");
const InnerError = error{
OutOfMemory,
CodegenFail,
+ OutOfRegisters,
};
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
@@ -308,6 +309,9 @@ pub fn generate(
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -319,6 +323,9 @@ pub fn generate(
function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 63a0efbad8..81c9fa5734 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -12,6 +12,17 @@ const expectEqualSlices = std.testing.expectEqualSlices;
const log = std.log.scoped(.register_manager);
+pub const AllocateRegistersError = error{
+ /// No registers are available anymore
+ OutOfRegisters,
+ /// Can happen when spilling an instruction in codegen runs out of
+ /// memory, so we propagate that error
+ OutOfMemory,
+ /// Can happen when spilling an instruction triggers a codegen
+ /// error, so we propagate that error
+ CodegenFail,
+};
+
pub fn RegisterManager(
comptime Function: type,
comptime Register: type,
@@ -168,8 +179,9 @@ pub fn RegisterManager(
self: *Self,
comptime count: comptime_int,
insts: [count]?Air.Inst.Index,
- ) ![count]Register {
+ ) AllocateRegistersError![count]Register {
comptime assert(count > 0 and count <= callee_preserved_regs.len);
+ if (count > callee_preserved_regs.len - @popCount(FreeRegInt, self.frozen_registers)) return error.OutOfRegisters;
const result = self.tryAllocRegs(count, insts) orelse blk: {
// We'll take over the first count registers. Spill
@@ -214,14 +226,14 @@ pub fn RegisterManager(
/// Allocates a register and optionally tracks it with a
/// corresponding instruction.
- pub fn allocReg(self: *Self, inst: ?Air.Inst.Index) !Register {
+ pub fn allocReg(self: *Self, inst: ?Air.Inst.Index) AllocateRegistersError!Register {
return (try self.allocRegs(1, .{inst}))[0];
}
/// Spills the register if it is currently allocated. If a
/// corresponding instruction is passed, will also track this
/// register.
- pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) !void {
+ pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
const index = reg.allocIndex() orelse return;
self.markRegAllocated(reg);
@@ -317,6 +329,13 @@ fn MockFunction(comptime Register: type) type {
_ = inst;
try self.spilled.append(self.allocator, reg);
}
+
+ pub fn genAdd(self: *Self, res: Register, lhs: Register, rhs: Register) !void {
+ _ = self;
+ _ = res;
+ _ = lhs;
+ _ = rhs;
+ }
};
}
@@ -431,7 +450,9 @@ test "tryAllocRegs" {
try expect(function.register_manager.isRegAllocated(.r3));
}
-test "allocRegs" {
+test "allocRegs: normal usage" {
+ // TODO: convert this into a decltest once that is supported
+
const allocator = std.testing.allocator;
var function = MockFunction2{
@@ -439,35 +460,57 @@ test "allocRegs" {
};
defer function.deinit();
- const mock_instruction: Air.Inst.Index = 1;
-
- try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, try function.register_manager.allocRegs(3, .{
- mock_instruction,
- mock_instruction,
- mock_instruction,
- }));
-
- try expect(function.register_manager.isRegAllocated(.r0));
- try expect(function.register_manager.isRegAllocated(.r1));
- try expect(function.register_manager.isRegAllocated(.r2));
- try expect(!function.register_manager.isRegAllocated(.r3));
-
- // Frozen registers
- function.register_manager.freeReg(.r0);
- function.register_manager.freeReg(.r2);
- function.register_manager.freeReg(.r3);
{
- function.register_manager.freezeRegs(&.{.r1});
- defer function.register_manager.unfreezeRegs(&.{.r1});
+ const result_reg: MockRegister2 = .r1;
- try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, try function.register_manager.allocRegs(3, .{ null, null, null }));
+ // The result register is known and fixed at this point, we
+ // don't want to accidentally allocate lhs or rhs to the
+ // result register, this is why we freeze it.
+ //
+ // Using defer unfreeze right after freeze is a good idea in
+ // most cases as you probably are using the frozen registers
+ // in the remainder of this scope and don't need to use it
+ // after the end of this scope. However, in some situations,
+ // it may make sense to manually unfreeze registers before the
+ // end of the scope when you are certain that they don't
+ // contain any valuable data anymore and can be reused. For an
+ // example of that, see `selectively reducing register
+ // pressure`.
+ function.register_manager.freezeRegs(&.{result_reg});
+ defer function.register_manager.unfreezeRegs(&.{result_reg});
+
+ const regs = try function.register_manager.allocRegs(2, .{ null, null });
+ try function.genAdd(result_reg, regs[0], regs[1]);
}
- try expect(!function.register_manager.frozenRegsExist());
+}
- try expect(function.register_manager.isRegAllocated(.r0));
- try expect(function.register_manager.isRegAllocated(.r1));
- try expect(function.register_manager.isRegAllocated(.r2));
- try expect(function.register_manager.isRegAllocated(.r3));
+test "allocRegs: selectively reducing register pressure" {
+ // TODO: convert this into a decltest once that is supported
+
+ const allocator = std.testing.allocator;
+
+ var function = MockFunction2{
+ .allocator = allocator,
+ };
+ defer function.deinit();
+
+ {
+ const result_reg: MockRegister2 = .r1;
+
+ function.register_manager.freezeRegs(&.{result_reg});
+ defer function.register_manager.unfreezeRegs(&.{result_reg});
+
+ // Here, we don't defer unfreeze because we manually unfreeze
+ // after genAdd
+ const regs = try function.register_manager.allocRegs(2, .{ null, null });
+ function.register_manager.freezeRegs(&.{result_reg});
+
+ try function.genAdd(result_reg, regs[0], regs[1]);
+ function.register_manager.unfreezeRegs(®s);
+
+ const extra_summand_reg = try function.register_manager.allocReg(null);
+ try function.genAdd(result_reg, result_reg, extra_summand_reg);
+ }
}
test "getReg" {
From f132f426b9a11774323eafc669da0c9731deb85b Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 5 Feb 2022 19:09:20 +0100
Subject: [PATCH 0043/2031] x86_64: add distinct MCValue representing symbol
index in the linker
For PIE targets, we defer getting an address of value until the linker
has allocated all atoms and performed the relocations. In codegen,
we represent this via `MCValue.linker_sym_index` value.
---
src/arch/x86_64/CodeGen.zig | 123 ++++++++++++++++++++++--------------
1 file changed, 76 insertions(+), 47 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index e464dd3cf9..e5c7c99501 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -118,6 +118,10 @@ pub const MCValue = union(enum) {
/// The value is in memory at a hard-coded address.
/// If the type is a pointer, it means the pointer address is at this memory location.
memory: u64,
+ /// The value is in memory but not allocated an address yet by the linker, so we store
+ /// the symbol index instead.
+ /// If the type is a pointer, it means the pointer is the symbol.
+ linker_sym_index: u32,
/// The value is one of the stack variables.
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
stack_offset: i32,
@@ -1686,8 +1690,10 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}),
}
},
- .memory => |addr| {
- const reg = try self.copyToTmpRegister(ptr_ty, .{ .memory = addr });
+ .memory,
+ .linker_sym_index,
+ => {
+ const reg = try self.copyToTmpRegister(ptr_ty, ptr);
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
},
.stack_offset => {
@@ -1817,27 +1823,33 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
},
}
},
- .memory => |addr| {
+ .linker_sym_index,
+ .memory,
+ => {
value.freezeIfRegister(&self.register_manager);
defer value.unfreezeIfRegister(&self.register_manager);
const addr_reg: Register = blk: {
- if (self.bin_file.options.pie) {
- const addr_reg = try self.register_manager.allocReg(null);
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = (Mir.Ops{
- .reg1 = addr_reg.to64(),
- .flags = 0b10,
- }).encode(),
- .data = .{ .got_entry = @truncate(u32, addr) },
- });
- break :blk addr_reg;
- } else {
- // TODO: in case the address fits in an imm32 we can use [ds:imm32]
- // instead of wasting an instruction copying the address to a register
- const addr_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = addr });
- break :blk addr_reg;
+ switch (ptr) {
+ .linker_sym_index => |sym_index| {
+ const addr_reg = try self.register_manager.allocReg(null);
+ _ = try self.addInst(.{
+ .tag = .lea,
+ .ops = (Mir.Ops{
+ .reg1 = addr_reg.to64(),
+ .flags = 0b10,
+ }).encode(),
+ .data = .{ .got_entry = sym_index },
+ });
+ break :blk addr_reg;
+ },
+ .memory => |addr| {
+ // TODO: in case the address fits in an imm32 we can use [ds:imm32]
+ // instead of wasting an instruction copying the address to a register
+ const addr_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = addr });
+ break :blk addr_reg;
+ },
+ else => unreachable,
}
};
@@ -2148,6 +2160,9 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
+ .linker_sym_index => {
+ return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
+ },
.stack_offset => |off| {
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
@@ -2232,6 +2247,9 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
+ .linker_sym_index => {
+ return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
+ },
.compare_flags_unsigned => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{});
},
@@ -2243,6 +2261,9 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{});
},
+ .linker_sym_index => {
+ return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{});
+ },
}
}
@@ -2296,6 +2317,9 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
+ .linker_sym_index => {
+ return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
+ },
.compare_flags_unsigned => {
return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{});
},
@@ -2334,6 +2358,9 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
+ .linker_sym_index => {
+ return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
+ },
.compare_flags_unsigned => {
return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{});
},
@@ -2345,6 +2372,9 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 multiply destination memory", .{});
},
+ .linker_sym_index => {
+ return self.fail("TODO implement x86 multiply destination symbol at index in linker", .{});
+ },
}
}
@@ -2448,6 +2478,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
+ .linker_sym_index => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
@@ -2508,10 +2539,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
try self.genSetReg(Type.initTag(.usize), .rax, .{
- .memory = func.owner_decl.link.macho.local_sym_index,
+ .linker_sym_index = func.owner_decl.link.macho.local_sym_index,
});
// callq *%rax
_ = try self.addInst(.{
@@ -3547,6 +3576,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
},
.memory,
.embedded_in_code,
+ .linker_sym_index,
=> {
if (ty.abiSize(self.target.*) <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
@@ -3952,29 +3982,28 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = undefined,
});
},
+ .linker_sym_index => |sym_index| {
+ _ = try self.addInst(.{
+ .tag = .lea,
+ .ops = (Mir.Ops{
+ .reg1 = reg,
+ .flags = 0b10,
+ }).encode(),
+ .data = .{ .got_entry = sym_index },
+ });
+ // MOV reg, [reg]
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .ops = (Mir.Ops{
+ .reg1 = reg,
+ .reg2 = reg,
+ .flags = 0b01,
+ }).encode(),
+ .data = .{ .imm = 0 },
+ });
+ },
.memory => |x| {
- // TODO can we move this entire logic into Emit.zig like with aarch64?
- if (self.bin_file.options.pie) {
- // TODO we should flag up `x` as GOT symbol entry explicitly rather than as a hack.
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = (Mir.Ops{
- .reg1 = reg,
- .flags = 0b10,
- }).encode(),
- .data = .{ .got_entry = @truncate(u32, x) },
- });
- // MOV reg, [reg]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = (Mir.Ops{
- .reg1 = reg,
- .reg2 = reg,
- .flags = 0b01,
- }).encode(),
- .data = .{ .imm = 0 },
- });
- } else if (x <= math.maxInt(i32)) {
+ if (x <= math.maxInt(i32)) {
// mov reg, [ds:imm32]
_ = try self.addInst(.{
.tag = .mov,
@@ -4285,9 +4314,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.local_sym_index };
+ // Because MachO is PIE-always-on, we defer memory address resolution until
+ // the linker has enough info to perform relocations.
+ return MCValue{ .linker_sym_index = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
From 6f87f49f3ddf4fb56f8b4e2ff86d1020ab2dd690 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johannes=20L=C3=B6thberg?=
Date: Sun, 6 Feb 2022 03:37:55 +0100
Subject: [PATCH 0044/2031] CLI: remove remainders of --verbose-ast and
--verbose-tokenize
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
These options were removed in 5e63baae8 (CLI: remove --verbose-ast and
--verbose-tokenize, 2021-06-09) but some remainders were left in.
Signed-off-by: Johannes Löthberg
---
lib/std/build.zig | 6 ------
lib/std/special/build_runner.zig | 6 ------
src/stage1/zig0.cpp | 2 --
3 files changed, 14 deletions(-)
diff --git a/lib/std/build.zig b/lib/std/build.zig
index ca1c708583..05f36f5714 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -34,8 +34,6 @@ pub const Builder = struct {
available_options_map: AvailableOptionsMap,
available_options_list: ArrayList(AvailableOption),
verbose: bool,
- verbose_tokenize: bool,
- verbose_ast: bool,
verbose_link: bool,
verbose_cc: bool,
verbose_air: bool,
@@ -172,8 +170,6 @@ pub const Builder = struct {
.cache_root = try fs.path.relative(allocator, build_root, cache_root),
.global_cache_root = global_cache_root,
.verbose = false,
- .verbose_tokenize = false,
- .verbose_ast = false,
.verbose_link = false,
.verbose_cc = false,
.verbose_air = false,
@@ -2405,8 +2401,6 @@ pub const LibExeObjStep = struct {
try zig_args.append(log_scope);
}
- if (builder.verbose_tokenize) zig_args.append("--verbose-tokenize") catch unreachable;
- if (builder.verbose_ast) zig_args.append("--verbose-ast") catch unreachable;
if (builder.verbose_cimport) zig_args.append("--verbose-cimport") catch unreachable;
if (builder.verbose_air) zig_args.append("--verbose-air") catch unreachable;
if (builder.verbose_llvm_ir) zig_args.append("--verbose-llvm-ir") catch unreachable;
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index 2a64861cf9..eb83ef8fcd 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -147,10 +147,6 @@ pub fn main() !void {
std.debug.print("Expected argument after --glibc-runtimes\n\n", .{});
return usageAndErr(builder, false, stderr_stream);
};
- } else if (mem.eql(u8, arg, "--verbose-tokenize")) {
- builder.verbose_tokenize = true;
- } else if (mem.eql(u8, arg, "--verbose-ast")) {
- builder.verbose_ast = true;
} else if (mem.eql(u8, arg, "--verbose-link")) {
builder.verbose_link = true;
} else if (mem.eql(u8, arg, "--verbose-air")) {
@@ -310,8 +306,6 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
\\ --cache-dir [path] Override path to zig cache directory
\\ --zig-lib-dir [arg] Override path to Zig lib directory
\\ --debug-log [scope] Enable debugging the compiler
- \\ --verbose-tokenize Enable compiler debug output for tokenization
- \\ --verbose-ast Enable compiler debug output for parsing into an AST
\\ --verbose-link Enable compiler debug output for linking
\\ --verbose-air Enable compiler debug output for Zig AIR
\\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
diff --git a/src/stage1/zig0.cpp b/src/stage1/zig0.cpp
index 439f2bd1d4..f675fa1061 100644
--- a/src/stage1/zig0.cpp
+++ b/src/stage1/zig0.cpp
@@ -51,8 +51,6 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
" --strip exclude debug symbols\n"
" -target [name] -- see the targets command\n"
" -mcpu [cpu] specify target CPU and feature set\n"
- " --verbose-tokenize enable compiler debug output for tokenization\n"
- " --verbose-ast enable compiler debug output for AST parsing\n"
" --verbose-ir enable compiler debug output for Zig IR\n"
" --verbose-llvm-ir enable compiler debug output for LLVM IR\n"
" --verbose-cimport enable compiler debug output for C imports\n"
From 4468abfc424ba645413ee076e5e2e370aa807bcc Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 5 Feb 2022 23:17:34 +0100
Subject: [PATCH 0045/2031] stage2 ARM: enable a handful of passing behavior
tests
---
test/behavior/align.zig | 16 ----------------
test/behavior/alignof.zig | 1 -
test/behavior/bitcast.zig | 6 ------
test/behavior/bugs/1025.zig | 1 -
test/behavior/bugs/1500.zig | 1 -
test/behavior/generics.zig | 2 --
.../namespace_depends_on_compile_var.zig | 1 -
7 files changed, 28 deletions(-)
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 40b0304371..7871dec24b 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -22,14 +22,10 @@ test "global variable alignment" {
}
test "default alignment allows unspecified in type syntax" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(*u32 == *align(@alignOf(u32)) u32);
}
test "implicitly decreasing pointer alignment" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
try expect(addUnaligned(&a, &b) == 7);
@@ -40,8 +36,6 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
}
test "@alignCast pointers" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
var x: u32 align(4) = 1;
expectsOnly1(&x);
try expect(x == 2);
@@ -54,8 +48,6 @@ fn expects4(x: *align(4) u32) void {
}
test "alignment of structs" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(@alignOf(struct {
a: i32,
b: *i32,
@@ -63,15 +55,11 @@ test "alignment of structs" {
}
test "alignment of >= 128-bit integer type" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(@alignOf(u128) == 16);
try expect(@alignOf(u129) == 16);
}
test "alignment of struct with 128-bit field" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(@alignOf(struct {
x: u128,
}) == 16);
@@ -84,8 +72,6 @@ test "alignment of struct with 128-bit field" {
}
test "size of extern struct with 128-bit field" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(@sizeOf(extern struct {
x: u128,
y: u8,
@@ -100,8 +86,6 @@ test "size of extern struct with 128-bit field" {
}
test "@ptrCast preserves alignment of bigger source" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
var x: u32 align(16) = 1234;
const ptr = @ptrCast(*u8, &x);
try expect(@TypeOf(ptr) == *align(16) u8);
diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig
index 54e09877e1..749855db52 100644
--- a/test/behavior/alignof.zig
+++ b/test/behavior/alignof.zig
@@ -22,7 +22,6 @@ test "@alignOf(T) before referencing T" {
test "comparison of @alignOf(T) against zero" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
{
const T = struct { x: u32 };
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index a4a555057e..d56e3c1c53 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -76,7 +76,6 @@ test "@bitCast packed structs at runtime and comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Full = packed struct {
number: u16,
@@ -113,7 +112,6 @@ test "@bitCast extern structs at runtime and comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Full = extern struct {
number: u16,
@@ -147,7 +145,6 @@ test "bitcast packed struct to integer and back" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const LevelUpMove = packed struct {
move_id: u9,
@@ -184,7 +181,6 @@ test "bitcast packed struct literal to byte" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Foo = packed struct {
value: u8,
@@ -198,7 +194,6 @@ test "comptime bitcast used in expression has the correct type" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Foo = packed struct {
value: u8,
@@ -211,7 +206,6 @@ test "bitcast passed as tuple element" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn foo(args: anytype) !void {
diff --git a/test/behavior/bugs/1025.zig b/test/behavior/bugs/1025.zig
index fa72e522de..33ceb9fedf 100644
--- a/test/behavior/bugs/1025.zig
+++ b/test/behavior/bugs/1025.zig
@@ -10,7 +10,6 @@ fn getA() A {
test "bug 1025" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a = getA();
try @import("std").testing.expect(a.B == u8);
}
diff --git a/test/behavior/bugs/1500.zig b/test/behavior/bugs/1500.zig
index d224bfcc4a..eb2a06b7fb 100644
--- a/test/behavior/bugs/1500.zig
+++ b/test/behavior/bugs/1500.zig
@@ -7,7 +7,6 @@ const B = *const fn (A) void;
test "allow these dependencies" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var a: A = undefined;
var b: B = undefined;
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index 72f5fd9594..1942e82340 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -166,7 +166,6 @@ test "generic fn keeps non-generic parameter types" {
test "array of generic fns" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(foos[0](true));
try expect(!foos[1](true));
@@ -186,7 +185,6 @@ fn foo2(arg: anytype) bool {
test "generic struct" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var a1 = GenNode(i32){
.value = 13,
diff --git a/test/behavior/namespace_depends_on_compile_var.zig b/test/behavior/namespace_depends_on_compile_var.zig
index 6b79df1a81..db1dfaf308 100644
--- a/test/behavior/namespace_depends_on_compile_var.zig
+++ b/test/behavior/namespace_depends_on_compile_var.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "namespace depends on compile var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (some_namespace.a_bool) {
try expect(some_namespace.a_bool);
From 556f0ce5bfb962764bb68d9d9c16765cf9e1b9ba Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 5 Feb 2022 17:41:03 +0100
Subject: [PATCH 0046/2031] stage2: add new Decl subtype, ExternFn
`ExternFn` will contain a maybe-lib-name if it was defined with
the `extern` keyword like so
```zig
extern "c" fn write(usize, usize, usize) usize;
```
`lib_name` will live as long as `ExternFn` decl does.
---
src/Module.zig | 36 ++++++++++++++++++++++++++++++++----
src/Sema.zig | 22 ++++++++++++++++++----
src/arch/aarch64/CodeGen.zig | 11 +++++++++--
src/arch/wasm/CodeGen.zig | 6 +++---
src/arch/x86_64/CodeGen.zig | 11 +++++++++--
src/codegen/c.zig | 8 ++++----
src/codegen/llvm.zig | 4 ++--
src/value.zig | 19 +++++++++++++------
8 files changed, 90 insertions(+), 27 deletions(-)
diff --git a/src/Module.zig b/src/Module.zig
index e2e2505927..4fd3ee5c01 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -501,6 +501,10 @@ pub const Decl = struct {
}
pub fn clearValues(decl: *Decl, gpa: Allocator) void {
+ if (decl.getExternFn()) |extern_fn| {
+ extern_fn.deinit(gpa);
+ gpa.destroy(extern_fn);
+ }
if (decl.getFunction()) |func| {
func.deinit(gpa);
gpa.destroy(func);
@@ -690,6 +694,13 @@ pub const Decl = struct {
return func;
}
+ pub fn getExternFn(decl: *const Decl) ?*ExternFn {
+ if (!decl.owns_tv) return null;
+ const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data;
+ assert(extern_fn.owner_decl == decl);
+ return extern_fn;
+ }
+
pub fn getVariable(decl: *Decl) ?*Var {
if (!decl.owns_tv) return null;
const variable = (decl.val.castTag(.variable) orelse return null).data;
@@ -1320,9 +1331,26 @@ pub const Opaque = struct {
}
};
+/// Some extern function struct memory is owned by the Decl's TypedValue.Managed
+/// arena allocator.
+pub const ExternFn = struct {
+ /// The Decl that corresponds to the function itself.
+ owner_decl: *Decl,
+ /// Library name if specified.
+ /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
+ /// Allocated with Module's allocator; outlives the ZIR code.
+ lib_name: ?[*:0]const u8,
+
+ pub fn deinit(extern_fn: *ExternFn, gpa: Allocator) void {
+ if (extern_fn.lib_name) |lib_name| {
+ gpa.free(mem.sliceTo(lib_name, 0));
+ }
+ }
+};
+
/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
-/// Extern functions do not have this data structure; they are represented by
-/// the `Decl` only, with a `Value` tag of `extern_fn`.
+/// Extern functions do not have this data structure; they are represented by `ExternFn`
+/// instead.
pub const Fn = struct {
/// The Decl that corresponds to the function itself.
owner_decl: *Decl,
@@ -3768,8 +3796,8 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
},
.extern_fn => {
- const owner_decl = decl_tv.val.castTag(.extern_fn).?.data;
- if (decl == owner_decl) {
+ const extern_fn = decl_tv.val.castTag(.extern_fn).?.data;
+ if (extern_fn.owner_decl == decl) {
decl.owns_tv = true;
queue_linker_work = true;
is_extern = true;
diff --git a/src/Sema.zig b/src/Sema.zig
index c4b3ad8c33..48d7545a75 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5614,10 +5614,24 @@ fn funcCommon(
}
if (is_extern) {
- return sema.addConstant(
- fn_ty,
- try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl),
- );
+ const new_extern_fn = try sema.gpa.create(Module.ExternFn);
+ errdefer sema.gpa.destroy(new_extern_fn);
+
+ const lib_name: ?[*:0]const u8 = if (opt_lib_name) |lib_name| blk: {
+ break :blk try sema.gpa.dupeZ(u8, lib_name);
+ } else null;
+
+ new_extern_fn.* = Module.ExternFn{
+ .owner_decl = sema.owner_decl,
+ .lib_name = lib_name,
+ };
+
+ const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn);
+ extern_fn_payload.* = .{
+ .base = .{ .tag = .extern_fn },
+ .data = new_extern_fn,
+ };
+ return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base));
}
if (!has_body) {
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 6202d2e74f..0292497df7 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1574,8 +1574,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
- const decl = func_payload.data;
- const n_strx = try macho_file.addExternFn(mem.sliceTo(decl.name, 0));
+ const extern_fn = func_payload.data;
+ const decl_name = extern_fn.owner_decl.name;
+ if (extern_fn.lib_name) |lib_name| {
+ log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
+ decl_name,
+ lib_name,
+ });
+ }
+ const n_strx = try macho_file.addExternFn(mem.sliceTo(decl_name, 0));
_ = try self.addInst(.{
.tag = .call_extern,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 67aa9a6c88..8e0ffac76b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -952,7 +952,7 @@ pub const DeclGen = struct {
_ = func_payload;
return self.fail("TODO wasm backend genDecl function pointer", .{});
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
- const ext_decl = extern_fn.data;
+ const ext_decl = extern_fn.data.owner_decl;
var func_type = try genFunctype(self.gpa, ext_decl.ty, self.target());
func_type.deinit(self.gpa);
ext_decl.fn_link.wasm.type_index = try self.bin_file.putOrGetFuncType(func_type);
@@ -978,7 +978,7 @@ pub const DeclGen = struct {
switch (ty.zigTypeTag()) {
.Fn => {
const fn_decl = switch (val.tag()) {
- .extern_fn => val.castTag(.extern_fn).?.data,
+ .extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
.function => val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
@@ -1776,7 +1776,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (func_val.castTag(.function)) |func| {
break :blk func.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |ext_fn| {
- break :blk ext_fn.data;
+ break :blk ext_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
break :blk decl_ref.data;
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index b3a292a7f4..1104a94112 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2516,8 +2516,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
.data = undefined,
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
- const decl = func_payload.data;
- const n_strx = try macho_file.addExternFn(mem.sliceTo(decl.name, 0));
+ const extern_fn = func_payload.data;
+ const decl_name = extern_fn.owner_decl.name;
+ if (extern_fn.lib_name) |lib_name| {
+ log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
+ decl_name,
+ lib_name,
+ });
+ }
+ const n_strx = try macho_file.addExternFn(mem.sliceTo(decl_name, 0));
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 299581cd48..44b904f031 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -542,8 +542,8 @@ pub const DeclGen = struct {
try dg.renderDeclName(func.owner_decl, writer);
},
.extern_fn => {
- const decl = val.castTag(.extern_fn).?.data;
- try dg.renderDeclName(decl, writer);
+ const extern_fn = val.castTag(.extern_fn).?.data;
+ try dg.renderDeclName(extern_fn.owner_decl, writer);
},
.int_u64, .one => {
try writer.writeAll("((");
@@ -681,7 +681,7 @@ pub const DeclGen = struct {
return dg.renderDeclValue(writer, ty, val, decl);
},
.extern_fn => {
- const decl = val.castTag(.extern_fn).?.data;
+ const decl = val.castTag(.extern_fn).?.data.owner_decl;
return dg.renderDeclValue(writer, ty, val, decl);
},
else => unreachable,
@@ -2442,7 +2442,7 @@ fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
const fn_decl = fn_decl: {
const callee_val = f.air.value(pl_op.operand) orelse break :known;
break :fn_decl switch (callee_val.tag()) {
- .extern_fn => callee_val.castTag(.extern_fn).?.data,
+ .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl,
.function => callee_val.castTag(.function).?.data.owner_decl,
.decl_ref => callee_val.castTag(.decl_ref).?.data,
else => break :known,
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 81742d4866..e9457be783 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -622,7 +622,7 @@ pub const DeclGen = struct {
_ = func_payload;
@panic("TODO llvm backend genDecl function pointer");
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
- _ = try dg.resolveLlvmFunction(extern_fn.data);
+ _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl);
} else {
const target = dg.module.getTarget();
const global = try dg.resolveGlobalDecl(decl);
@@ -1410,7 +1410,7 @@ pub const DeclGen = struct {
},
.Fn => {
const fn_decl = switch (tv.val.tag()) {
- .extern_fn => tv.val.castTag(.extern_fn).?.data,
+ .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl,
.function => tv.val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
diff --git a/src/value.zig b/src/value.zig
index e444e2daf1..38cba74b0b 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -262,9 +262,9 @@ pub const Value = extern union {
.int_big_negative,
=> Payload.BigInt,
- .extern_fn,
- .decl_ref,
- => Payload.Decl,
+ .extern_fn => Payload.ExternFn,
+
+ .decl_ref => Payload.Decl,
.repeated,
.eu_payload,
@@ -475,7 +475,7 @@ pub const Value = extern union {
return Value{ .ptr_otherwise = &new_payload.base };
},
.function => return self.copyPayloadShallow(arena, Payload.Function),
- .extern_fn => return self.copyPayloadShallow(arena, Payload.Decl),
+ .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn),
.variable => return self.copyPayloadShallow(arena, Payload.Variable),
.decl_ref => return self.copyPayloadShallow(arena, Payload.Decl),
.decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut),
@@ -1803,9 +1803,10 @@ pub const Value = extern union {
pub fn pointerDecl(val: Value) ?*Module.Decl {
return switch (val.tag()) {
.decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl,
- .extern_fn, .decl_ref => val.cast(Payload.Decl).?.data,
+ .extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
.function => val.castTag(.function).?.data.owner_decl,
.variable => val.castTag(.variable).?.data.owner_decl,
+ .decl_ref => val.cast(Payload.Decl).?.data,
else => null,
};
}
@@ -1872,9 +1873,10 @@ pub const Value = extern union {
pub fn markReferencedDeclsAlive(val: Value) void {
switch (val.tag()) {
.decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.markAlive(),
- .extern_fn, .decl_ref => return val.cast(Payload.Decl).?.data.markAlive(),
+ .extern_fn => return val.castTag(.extern_fn).?.data.owner_decl.markAlive(),
.function => return val.castTag(.function).?.data.owner_decl.markAlive(),
.variable => return val.castTag(.variable).?.data.owner_decl.markAlive(),
+ .decl_ref => return val.cast(Payload.Decl).?.data.markAlive(),
.repeated,
.eu_payload,
@@ -3148,6 +3150,11 @@ pub const Value = extern union {
data: *Module.Fn,
};
+ pub const ExternFn = struct {
+ base: Payload,
+ data: *Module.ExternFn,
+ };
+
pub const Decl = struct {
base: Payload,
data: *Module.Decl,
From db9500a31401c65327a4fd556f50d74ce75fb858 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sun, 6 Feb 2022 09:14:15 +0100
Subject: [PATCH 0047/2031] stage2: handle extern lib name annotation for vars
For example, a situation like this is allowed
```zig
extern "c" var stderrp: c_int;
```
In this case, `Module.Var` wrapping `stderrp` will have `lib_name`
populated with the library name where this import is expected.
---
src/Module.zig | 16 ++++++
src/Sema.zig | 134 ++++++++++++++++++++++++++++---------------------
2 files changed, 93 insertions(+), 57 deletions(-)
diff --git a/src/Module.zig b/src/Module.zig
index 4fd3ee5c01..76997fe95b 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -510,6 +510,7 @@ pub const Decl = struct {
gpa.destroy(func);
}
if (decl.getVariable()) |variable| {
+ variable.deinit(gpa);
gpa.destroy(variable);
}
if (decl.value_arena) |arena_state| {
@@ -694,6 +695,8 @@ pub const Decl = struct {
return func;
}
+ /// If the Decl has a value and it is an extern function, returns it,
+ /// otherwise null.
pub fn getExternFn(decl: *const Decl) ?*ExternFn {
if (!decl.owns_tv) return null;
const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data;
@@ -701,6 +704,8 @@ pub const Decl = struct {
return extern_fn;
}
+ /// If the Decl has a value and it is a variable, returns it,
+ /// otherwise null.
pub fn getVariable(decl: *Decl) ?*Var {
if (!decl.owns_tv) return null;
const variable = (decl.val.castTag(.variable) orelse return null).data;
@@ -1469,9 +1474,20 @@ pub const Var = struct {
init: Value,
owner_decl: *Decl,
+ /// Library name if specified.
+ /// For example `extern "c" var stderrp = ...` would have 'c' as library name.
+ /// Allocated with Module's allocator; outlives the ZIR code.
+ lib_name: ?[*:0]const u8,
+
is_extern: bool,
is_mutable: bool,
is_threadlocal: bool,
+
+ pub fn deinit(variable: *Var, gpa: Allocator) void {
+ if (variable.lib_name) |lib_name| {
+ gpa.free(mem.sliceTo(lib_name, 0));
+ }
+ }
};
/// The container that structs, enums, unions, and opaques have.
diff --git a/src/Sema.zig b/src/Sema.zig
index 48d7545a75..0e43a9f522 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5430,6 +5430,69 @@ fn zirFunc(
);
}
+/// Given a library name, examines if the library name should end up in
+/// `link.File.Options.system_libs` table (for example, libc is always
+/// specified via dedicated flag `link.File.Options.link_libc` instead),
+/// and puts it there if it doesn't exist.
+/// It also dupes the library name which can then be saved as part of the
+/// respective `Decl` (either `ExternFn` or `Var`).
+/// The liveness of the duped library name is tied to liveness of `Module`.
+/// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`).
+fn handleExternLibName(
+ sema: *Sema,
+ block: *Block,
+ src_loc: LazySrcLoc,
+ lib_name: []const u8,
+) CompileError![:0]u8 {
+ blk: {
+ const mod = sema.mod;
+ const target = mod.getTarget();
+ log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
+ if (target_util.is_libc_lib_name(target, lib_name)) {
+ if (!mod.comp.bin_file.options.link_libc) {
+ return sema.fail(
+ block,
+ src_loc,
+ "dependency on libc must be explicitly specified in the build command",
+ .{},
+ );
+ }
+ mod.comp.bin_file.options.link_libc = true;
+ break :blk;
+ }
+ if (target_util.is_libcpp_lib_name(target, lib_name)) {
+ if (!mod.comp.bin_file.options.link_libcpp) {
+ return sema.fail(
+ block,
+ src_loc,
+ "dependency on libc++ must be explicitly specified in the build command",
+ .{},
+ );
+ }
+ mod.comp.bin_file.options.link_libcpp = true;
+ break :blk;
+ }
+ if (mem.eql(u8, lib_name, "unwind")) {
+ mod.comp.bin_file.options.link_libunwind = true;
+ break :blk;
+ }
+ if (!target.isWasm() and !mod.comp.bin_file.options.pic) {
+ return sema.fail(
+ block,
+ src_loc,
+ "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.",
+ .{ lib_name, lib_name },
+ );
+ }
+ mod.comp.stage1AddLinkLib(lib_name) catch |err| {
+ return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{
+ lib_name, @errorName(err),
+ });
+ };
+ }
+ return sema.gpa.dupeZ(u8, lib_name);
+}
+
fn funcCommon(
sema: *Sema,
block: *Block,
@@ -5567,65 +5630,21 @@ fn funcCommon(
});
};
- if (opt_lib_name) |lib_name| blk: {
- const lib_name_src: LazySrcLoc = .{ .node_offset_lib_name = src_node_offset };
- log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
- if (target_util.is_libc_lib_name(target, lib_name)) {
- if (!mod.comp.bin_file.options.link_libc) {
- return sema.fail(
- block,
- lib_name_src,
- "dependency on libc must be explicitly specified in the build command",
- .{},
- );
- }
- mod.comp.bin_file.options.link_libc = true;
- break :blk;
- }
- if (target_util.is_libcpp_lib_name(target, lib_name)) {
- if (!mod.comp.bin_file.options.link_libcpp) {
- return sema.fail(
- block,
- lib_name_src,
- "dependency on libc++ must be explicitly specified in the build command",
- .{},
- );
- }
- mod.comp.bin_file.options.link_libcpp = true;
- break :blk;
- }
- if (mem.eql(u8, lib_name, "unwind")) {
- mod.comp.bin_file.options.link_libunwind = true;
- break :blk;
- }
- if (!target.isWasm() and !mod.comp.bin_file.options.pic) {
- return sema.fail(
- block,
- lib_name_src,
- "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.",
- .{ lib_name, lib_name },
- );
- }
- mod.comp.stage1AddLinkLib(lib_name) catch |err| {
- return sema.fail(block, lib_name_src, "unable to add link lib '{s}': {s}", .{
- lib_name, @errorName(err),
- });
- };
- }
-
if (is_extern) {
const new_extern_fn = try sema.gpa.create(Module.ExternFn);
errdefer sema.gpa.destroy(new_extern_fn);
- const lib_name: ?[*:0]const u8 = if (opt_lib_name) |lib_name| blk: {
- break :blk try sema.gpa.dupeZ(u8, lib_name);
- } else null;
-
new_extern_fn.* = Module.ExternFn{
.owner_decl = sema.owner_decl,
- .lib_name = lib_name,
+ .lib_name = null,
};
+ if (opt_lib_name) |lib_name| {
+ new_extern_fn.lib_name = try sema.handleExternLibName(block, .{
+ .node_offset_lib_name = src_node_offset,
+ }, lib_name);
+ }
+
const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn);
extern_fn_payload.* = .{
.base = .{ .tag = .extern_fn },
@@ -12456,13 +12475,8 @@ fn zirVarExtended(
try sema.validateVarType(block, mut_src, var_ty, small.is_extern);
- if (lib_name != null) {
- // Look at the sema code for functions which has this logic, it just needs to
- // be extracted and shared by both var and func
- return sema.fail(block, src, "TODO: handle var with lib_name in Sema", .{});
- }
-
const new_var = try sema.gpa.create(Module.Var);
+ errdefer sema.gpa.destroy(new_var);
log.debug("created variable {*} owner_decl: {*} ({s})", .{
new_var, sema.owner_decl, sema.owner_decl.name,
@@ -12474,7 +12488,13 @@ fn zirVarExtended(
.is_extern = small.is_extern,
.is_mutable = true, // TODO get rid of this unused field
.is_threadlocal = small.is_threadlocal,
+ .lib_name = null,
};
+
+ if (lib_name) |lname| {
+ new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname);
+ }
+
const result = try sema.addConstant(
var_ty,
try Value.Tag.variable.create(sema.arena, new_var),
From adc9a282d8b3cbe58e07c965fe40fb1dd8666bd7 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sun, 6 Feb 2022 18:18:53 +0100
Subject: [PATCH 0048/2031] stage2 ARM: fix load and store for abi_size < 4
Previously, in these cases, we would emit the ldr instruction even
though ldrb oder ldrh are the correct instructions.
---
src/arch/arm/CodeGen.zig | 157 ++++++++++++++++++++++++---------------
1 file changed, 98 insertions(+), 59 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index c87f750831..804aedb6cc 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -647,11 +647,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
}
}
-fn writeInt(self: *Self, comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
- const endian = self.target.cpu.arch.endian();
- std.mem.writeInt(T, buf, value, endian);
-}
-
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
@@ -906,7 +901,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :result r;
},
else => {
- break :result try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not);
+ break :result try self.genBinOp(inst, ty_op.operand, .bool_true, .not);
},
}
};
@@ -934,7 +929,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .add);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -952,7 +947,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
fn airSub(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .sub);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -970,7 +965,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
fn airMul(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmMul(inst, bin_op.lhs, bin_op.rhs);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genMul(inst, bin_op.lhs, bin_op.rhs);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1026,25 +1021,25 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBitOr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airXor(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .xor);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShl(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .shl);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .shl);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1056,7 +1051,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
fn airShr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .shr);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .shr);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1296,7 +1291,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
else => {
const dst_mcv = try self.allocRegOrMem(inst, true);
- const offset_mcv = try self.genArmMulConstant(bin_op.rhs, @intCast(u32, elem_size));
+ const offset_mcv = try self.genMulConstant(bin_op.rhs, @intCast(u32, elem_size));
assert(offset_mcv == .register); // result of multiplication should always be register
self.register_manager.freezeRegs(&.{offset_mcv.register});
@@ -1304,7 +1299,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg});
- try self.genArmBinOpCode(addr_reg, base_mcv, offset_mcv, false, .add, .unsigned);
+ try self.genBinOpCode(addr_reg, base_mcv, offset_mcv, false, .add, .unsigned);
// At this point in time, neither the base register
// nor the offset register contains any valuable data
@@ -1415,6 +1410,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -1440,24 +1437,17 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.compare_flags_signed, .compare_flags_unsigned => unreachable,
.embedded_in_code => unreachable,
.register => |dst_reg| {
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = dst_reg,
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
+ try self.genLdrRegister(dst_reg, reg, elem_size);
},
.stack_offset => |off| {
- if (elem_ty.abiSize(self.target.*) <= 4) {
+ if (elem_size <= 4) {
const tmp_reg = try self.register_manager.allocReg(null);
self.register_manager.freezeRegs(&.{tmp_reg});
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
- } else if (elem_ty.abiSize(self.target.*) == 8) {
+ } else if (elem_size == 8) {
// TODO generalize this: maybe add a
// genArmMemcpy function which manually copies
// data if the size is below a certain
@@ -1500,7 +1490,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
const tmp_reg = regs[3];
// sub dst_reg, fp, #off
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
const adj_off = off + elem_size;
const offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_off)) |x| x else {
return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
@@ -1528,7 +1517,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
});
// memcpy(src, dst, len)
- try self.genArmInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
else => return self.fail("TODO load from register into {}", .{dst_mcv}),
@@ -1600,14 +1589,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
switch (value) {
.register => |value_reg| {
- _ = try self.addInst(.{
- .tag = .str,
- .data = .{ .rr_offset = .{
- .rt = value_reg,
- .rn = addr_reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
+ try self.genStrRegister(value_reg, addr_reg, @intCast(u32, value_ty.abiSize(self.target.*)));
},
else => {
if (value_ty.abiSize(self.target.*) <= 4) {
@@ -1723,7 +1705,7 @@ fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool {
};
}
-fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue {
+fn genBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue {
// In the case of bitshifts, the type of rhs is different
// from the resulting type
const ty = self.air.typeOf(op_lhs);
@@ -1732,17 +1714,17 @@ fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Bool => {
- return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, 1, .unsigned);
+ return self.genBinIntOp(inst, op_lhs, op_rhs, op, 1, .unsigned);
},
.Int => {
const int_info = ty.intInfo(self.target.*);
- return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, int_info.bits, int_info.signedness);
+ return self.genBinIntOp(inst, op_lhs, op_rhs, op, int_info.bits, int_info.signedness);
},
else => unreachable,
}
}
-fn genArmBinIntOp(
+fn genBinIntOp(
self: *Self,
inst: Air.Inst.Index,
op_lhs: Air.Inst.Ref,
@@ -1852,7 +1834,7 @@ fn genArmBinIntOp(
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
}
- try self.genArmBinOpCode(
+ try self.genBinOpCode(
dst_mcv.register,
lhs_mcv,
rhs_mcv,
@@ -1863,7 +1845,7 @@ fn genArmBinIntOp(
return dst_mcv;
}
-fn genArmBinOpCode(
+fn genBinOpCode(
self: *Self,
dst_reg: Register,
lhs_mcv: MCValue,
@@ -1971,7 +1953,7 @@ fn genArmBinOpCode(
}
}
-fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue {
+fn genMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue {
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
@@ -2050,7 +2032,7 @@ fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Ai
return dst_mcv;
}
-fn genArmMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue {
+fn genMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue {
const lhs = try self.resolveInst(op);
const rhs = MCValue{ .immediate = imm };
@@ -2097,7 +2079,71 @@ fn genArmMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue {
return dst_mcv;
}
-fn genArmInlineMemcpy(
+fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, abi_size: u32) !void {
+ switch (abi_size) {
+ 1, 3, 4 => {
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => .ldrb,
+ 3, 4 => .ldr,
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_offset = .{
+ .rt = dest_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.Offset.none },
+ } },
+ });
+ },
+ 2 => {
+ _ = try self.addInst(.{
+ .tag = .ldrh,
+ .data = .{ .rr_extra_offset = .{
+ .rt = dest_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
+ } },
+ });
+ },
+ else => unreachable, // invalid abi_size for a register
+ }
+}
+
+fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, abi_size: u32) !void {
+ switch (abi_size) {
+ 1, 3, 4 => {
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => .strb,
+ 3, 4 => .str,
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_offset = .{
+ .rt = source_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.Offset.none },
+ } },
+ });
+ },
+ 2 => {
+ _ = try self.addInst(.{
+ .tag = .strh,
+ .data = .{ .rr_extra_offset = .{
+ .rt = source_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
+ } },
+ });
+ },
+ else => unreachable, // invalid abi_size for a register
+ }
+}
+
+fn genInlineMemcpy(
self: *Self,
src: Register,
dst: Register,
@@ -2469,7 +2515,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
// The destination register is not present in the cmp instruction
// The signedness of the integer does not matter for the cmp instruction
- try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq, undefined);
+ try self.genBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq, undefined);
break :result switch (signedness) {
.signed => MCValue{ .compare_flags_signed = op },
@@ -2701,7 +2747,7 @@ fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
else => .{ .register = try self.copyToTmpRegister(ty, operand) },
};
- try self.genArmBinOpCode(undefined, reg_mcv, .{ .immediate = 0 }, false, .cmp_eq, undefined);
+ try self.genBinOpCode(undefined, reg_mcv, .{ .immediate = 0 }, false, .cmp_eq, undefined);
return MCValue{ .compare_flags_unsigned = .eq };
} else {
@@ -2731,7 +2777,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
};
- try self.genArmBinOpCode(undefined, reg_mcv, .{ .immediate = 0 }, false, .cmp_eq, undefined);
+ try self.genBinOpCode(undefined, reg_mcv, .{ .immediate = 0 }, false, .cmp_eq, undefined);
return MCValue{ .compare_flags_unsigned = .gt };
} else {
@@ -2946,8 +2992,8 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const air_tags = self.air.instructions.items(.tag);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (air_tags[inst]) {
- .bool_and => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and),
- .bool_or => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or),
+ .bool_and => try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and),
+ .bool_or => try self.genBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or),
else => unreachable, // Not a boolean operation
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -3242,7 +3288,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
});
// memcpy(src, dst, len)
- try self.genArmInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
}
@@ -3439,14 +3485,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) });
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = reg,
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
+ try self.genLdrRegister(reg, reg, @intCast(u32, ty.abiSize(self.target.*)));
},
.stack_offset => |unadjusted_off| {
// TODO: maybe addressing from sp instead of fp
From 53d8a25dab5ddcea16ac70cdcdf28cb3e4944cbb Mon Sep 17 00:00:00 2001
From: Jonathan Marler
Date: Sat, 5 Feb 2022 18:59:09 -0700
Subject: [PATCH 0049/2031] child_process: collectOutputWindows handle
broken_pipe from ReadFile
This was found on a user's machine when calling "git" as a child process from msys. Instead of getting BROKEN_PIPE on GetOverlappedREsult, it would occur on ReadFile which would then cause the function to hang because the async operation was never started.
---
lib/std/child_process.zig | 28 ++++++++++++++++++++++------
1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 7808dcd1e5..3e12312d5d 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -277,10 +277,19 @@ pub const ChildProcess = struct {
const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
try outs[i].ensureTotalCapacity(new_capacity);
const buf = outs[i].unusedCapacitySlice();
- _ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
- wait_objects[wait_object_count] = handles[i];
- wait_object_count += 1;
+ const read_result = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
+ std.debug.assert(read_result == 0);
+ switch (windows.kernel32.GetLastError()) {
+ .IO_PENDING => {
+ wait_objects[wait_object_count] = handles[i];
+ wait_object_count += 1;
+ },
+ .BROKEN_PIPE => {}, // don't add to the wait_objects list
+ else => |err| return windows.unexpectedError(err),
+ }
}
+ if (wait_object_count == 0)
+ return;
while (true) {
const status = windows.kernel32.WaitForMultipleObjects(wait_object_count, &wait_objects, 0, windows.INFINITE);
@@ -320,9 +329,16 @@ pub const ChildProcess = struct {
try outs[i].ensureTotalCapacity(new_capacity);
const buf = outs[i].unusedCapacitySlice();
if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong;
- _ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
- wait_objects[wait_object_count] = handles[i];
- wait_object_count += 1;
+ const read_result = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
+ std.debug.assert(read_result == 0);
+ switch (windows.kernel32.GetLastError()) {
+ .IO_PENDING => {
+ wait_objects[wait_object_count] = handles[i];
+ wait_object_count += 1;
+ },
+ .BROKEN_PIPE => {}, // don't add to the wait_objects list
+ else => |err| return windows.unexpectedError(err),
+ }
}
}
From 8f830207c47987de9767130d54abff79c8ec257d Mon Sep 17 00:00:00 2001
From: Jonathan Marler
Date: Sun, 6 Feb 2022 06:05:40 -0700
Subject: [PATCH 0050/2031] fix bug I think I found while manually reviewing
---
lib/std/child_process.zig | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 3e12312d5d..4ccfcb4029 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -288,10 +288,8 @@ pub const ChildProcess = struct {
else => |err| return windows.unexpectedError(err),
}
}
- if (wait_object_count == 0)
- return;
- while (true) {
+ while (wait_object_count > 0) {
const status = windows.kernel32.WaitForMultipleObjects(wait_object_count, &wait_objects, 0, windows.INFINITE);
if (status == windows.WAIT_FAILED) {
switch (windows.kernel32.GetLastError()) {
@@ -315,11 +313,7 @@ pub const ChildProcess = struct {
var read_bytes: u32 = undefined;
if (windows.kernel32.GetOverlappedResult(handles[i], &overlapped[i], &read_bytes, 0) == 0) {
switch (windows.kernel32.GetLastError()) {
- .BROKEN_PIPE => {
- if (wait_object_count == 0)
- break;
- continue;
- },
+ .BROKEN_PIPE => continue,
else => |err| return windows.unexpectedError(err),
}
}
From 4fddb591e2377186ef942084cae2e93d5de448b3 Mon Sep 17 00:00:00 2001
From: Jonathan Marler
Date: Sun, 6 Feb 2022 15:20:15 -0700
Subject: [PATCH 0051/2031] rework to allow ReadFile to complete synchronously
---
lib/std/child_process.zig | 55 +++++++++++++++++++++++++--------------
1 file changed, 36 insertions(+), 19 deletions(-)
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 4ccfcb4029..10aeacf755 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -252,6 +252,33 @@ pub const ChildProcess = struct {
}
}
+ const WindowsAsyncReadResult = enum {
+ pending,
+ closed,
+ full,
+ };
+
+ fn windowsAsyncRead(
+ handle: windows.HANDLE,
+ overlapped: *windows.OVERLAPPED,
+ buf: *std.ArrayList(u8),
+ bump_amt: usize,
+ max_output_bytes: usize,
+ ) !WindowsAsyncReadResult {
+ while (true) {
+ const new_capacity = std.math.min(buf.items.len + bump_amt, max_output_bytes);
+ try buf.ensureTotalCapacity(new_capacity);
+ const next_buf = buf.unusedCapacitySlice();
+ if (next_buf.len == 0) return .full;
+ const read_result = windows.kernel32.ReadFile(handle, next_buf.ptr, math.cast(u32, next_buf.len) catch maxInt(u32), null, overlapped);
+ if (read_result == 0) return switch (windows.kernel32.GetLastError()) {
+ .IO_PENDING => .pending,
+ .BROKEN_PIPE => .closed,
+ else => |err| windows.unexpectedError(err),
+ };
+ }
+ }
+
fn collectOutputWindows(child: *const ChildProcess, outs: [2]*std.ArrayList(u8), max_output_bytes: usize) !void {
const bump_amt = 512;
const handles = [_]windows.HANDLE{
@@ -274,18 +301,13 @@ pub const ChildProcess = struct {
// Windows Async IO requires an initial call to ReadFile before waiting on the handle
for ([_]u1{ 0, 1 }) |i| {
- const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
- try outs[i].ensureTotalCapacity(new_capacity);
- const buf = outs[i].unusedCapacitySlice();
- const read_result = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
- std.debug.assert(read_result == 0);
- switch (windows.kernel32.GetLastError()) {
- .IO_PENDING => {
+ switch (try windowsAsyncRead(handles[i], &overlapped[i], outs[i], bump_amt, max_output_bytes)) {
+ .pending => {
wait_objects[wait_object_count] = handles[i];
wait_object_count += 1;
},
- .BROKEN_PIPE => {}, // don't add to the wait_objects list
- else => |err| return windows.unexpectedError(err),
+ .closed => {}, // don't add to the wait_objects list
+ .full => return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong,
}
}
@@ -319,19 +341,14 @@ pub const ChildProcess = struct {
}
outs[i].items.len += read_bytes;
- const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
- try outs[i].ensureTotalCapacity(new_capacity);
- const buf = outs[i].unusedCapacitySlice();
- if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong;
- const read_result = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
- std.debug.assert(read_result == 0);
- switch (windows.kernel32.GetLastError()) {
- .IO_PENDING => {
+
+ switch (try windowsAsyncRead(handles[i], &overlapped[i], outs[i], bump_amt, max_output_bytes)) {
+ .pending => {
wait_objects[wait_object_count] = handles[i];
wait_object_count += 1;
},
- .BROKEN_PIPE => {}, // don't add to the wait_objects list
- else => |err| return windows.unexpectedError(err),
+ .closed => {}, // don't add to the wait_objects list
+ .full => return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong,
}
}
}
From fd1284ebd07ded1c67bbaff4c14f093051e56f59 Mon Sep 17 00:00:00 2001
From: John Schmidt
Date: Sun, 6 Feb 2022 22:11:41 +0100
Subject: [PATCH 0052/2031] stage2: apply type coercion in if expressions
When setting the break value in an if expression we must explicitly
check if a result location type coercion that needs to happen. This was
already done for switch expression, so let's just imitate that check
and fix for if expressions. To make this possible, we now also propagate
`rl_ty_inst` to sub scopes.
---
src/AstGen.zig | 42 +++++++++++++++++++++++++++++++++--------
test/behavior/basic.zig | 6 ++++++
2 files changed, 40 insertions(+), 8 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 59878f940c..228937fffa 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -5118,8 +5118,10 @@ fn setCondBrPayloadElideBlockStorePtr(
const astgen = then_scope.astgen;
const then_body = then_scope.instructionsSliceUpto(else_scope);
const else_body = else_scope.instructionsSlice();
- const then_body_len = @intCast(u32, then_body.len + @boolToInt(then_break != 0));
- const else_body_len = @intCast(u32, else_body.len + @boolToInt(else_break != 0));
+ const has_then_break = then_break != 0;
+ const has_else_break = else_break != 0;
+ const then_body_len = @intCast(u32, then_body.len + @boolToInt(has_then_break));
+ const else_body_len = @intCast(u32, else_body.len + @boolToInt(has_else_break));
try astgen.extra.ensureUnusedCapacity(astgen.gpa, @typeInfo(Zir.Inst.CondBr).Struct.fields.len +
then_body_len + else_body_len);
@@ -5135,26 +5137,49 @@ fn setCondBrPayloadElideBlockStorePtr(
const then_body_len_index = condbr_pl + 1;
const else_body_len_index = condbr_pl + 2;
+ // The break instructions need to have their operands coerced if the
+ // switch's result location is a `ty`. In this case we overwrite the
+ // `store_to_block_ptr` instruction with an `as` instruction and repurpose
+ // it as the break operand.
for (then_body) |src_inst| {
- if (zir_tags[src_inst] == .store_to_block_ptr) {
- if (zir_datas[src_inst].bin.lhs == block_ptr) {
+ if (zir_tags[src_inst] == .store_to_block_ptr and
+ zir_datas[src_inst].bin.lhs == block_ptr)
+ {
+ if (then_scope.rl_ty_inst != .none and has_then_break) {
+ zir_tags[src_inst] = .as;
+ zir_datas[src_inst].bin = .{
+ .lhs = then_scope.rl_ty_inst,
+ .rhs = zir_datas[then_break].@"break".operand,
+ };
+ zir_datas[then_break].@"break".operand = indexToRef(src_inst);
+ } else {
astgen.extra.items[then_body_len_index] -= 1;
continue;
}
}
astgen.extra.appendAssumeCapacity(src_inst);
}
- if (then_break != 0) astgen.extra.appendAssumeCapacity(then_break);
+ if (has_then_break) astgen.extra.appendAssumeCapacity(then_break);
+
for (else_body) |src_inst| {
- if (zir_tags[src_inst] == .store_to_block_ptr) {
- if (zir_datas[src_inst].bin.lhs == block_ptr) {
+ if (zir_tags[src_inst] == .store_to_block_ptr and
+ zir_datas[src_inst].bin.lhs == block_ptr)
+ {
+ if (else_scope.rl_ty_inst != .none and has_else_break) {
+ zir_tags[src_inst] = .as;
+ zir_datas[src_inst].bin = .{
+ .lhs = else_scope.rl_ty_inst,
+ .rhs = zir_datas[else_break].@"break".operand,
+ };
+ zir_datas[else_break].@"break".operand = indexToRef(src_inst);
+ } else {
astgen.extra.items[else_body_len_index] -= 1;
continue;
}
}
astgen.extra.appendAssumeCapacity(src_inst);
}
- if (else_break != 0) astgen.extra.appendAssumeCapacity(else_break);
+ if (has_else_break) astgen.extra.appendAssumeCapacity(else_break);
}
fn whileExpr(
@@ -9460,6 +9485,7 @@ const GenZir = struct {
.decl_node_index = gz.decl_node_index,
.decl_line = gz.decl_line,
.parent = scope,
+ .rl_ty_inst = gz.rl_ty_inst,
.astgen = gz.astgen,
.suspend_node = gz.suspend_node,
.nosuspend_node = gz.nosuspend_node,
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 186418b69c..0c2cfbc3d5 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -693,3 +693,9 @@ test "variable name containing underscores does not shadow int primitive" {
_ = u6__4;
_ = i2_04_8;
}
+
+test "if expression type coercion" {
+ var cond: bool = true;
+ const x: u16 = if (cond) 1 else 0;
+ try expect(@as(u16, x) == 1);
+}
From 287ff4ab58f8af70383b6e334c7c862c8b8fbeec Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 30 Jan 2022 17:23:14 -0700
Subject: [PATCH 0053/2031] stage2: add more float arithmetic and f80 support
AstGen: Fixed bug where f80 types in source were triggering illegal
behavior.
Value: handle f80 in floating point arithmetic functions.
Value: implement floatRem and floatMod
This commit introduces dependencies on compiler-rt that are not
implemented. Those are a prerequisite to merging this branch.
---
src/AstGen.zig | 6 +-
src/Sema.zig | 24 ++---
src/value.zig | 215 +++++++++++++++++++++++++++++++----------
test/behavior/math.zig | 2 -
4 files changed, 178 insertions(+), 69 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 228937fffa..4133d3d364 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -8409,10 +8409,11 @@ fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.In
.c_ushort_type,
.comptime_float_type,
.comptime_int_type,
- .f128_type,
.f16_type,
.f32_type,
.f64_type,
+ .f80_type,
+ .f128_type,
.i16_type,
.i32_type,
.i64_type,
@@ -8648,10 +8649,11 @@ fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool {
.c_ulong_type,
.c_ulonglong_type,
.c_ushort_type,
- .f128_type,
.f16_type,
.f32_type,
.f64_type,
+ .f80_type,
+ .f128_type,
.i16_type,
.i32_type,
.i64_type,
diff --git a/src/Sema.zig b/src/Sema.zig
index 934fa4064b..75492e2ae7 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -8187,7 +8187,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
scalar_type,
- try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .add };
@@ -8280,7 +8280,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
scalar_type,
- try lhs_val.floatSub(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatSub(rhs_val, scalar_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .sub };
@@ -8396,7 +8396,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
scalar_type,
- try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena, target),
);
}
} else {
@@ -8471,7 +8471,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
scalar_type,
- try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .div_trunc };
@@ -8534,7 +8534,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
scalar_type,
- try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .div_floor };
@@ -8586,7 +8586,7 @@ fn analyzeArithmetic(
// TODO: emit compile error if there is a remainder
return sema.addConstant(
scalar_type,
- try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .div_exact };
@@ -8641,7 +8641,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
scalar_type,
- try lhs_val.floatMul(rhs_val, scalar_type, sema.arena),
+ try lhs_val.floatMul(rhs_val, scalar_type, sema.arena, target),
);
}
} else break :rs .{ .src = lhs_src, .air_tag = .mul };
@@ -8797,7 +8797,7 @@ fn analyzeArithmetic(
}
return sema.addConstant(
scalar_type,
- try lhs_val.floatRem(rhs_val, sema.arena),
+ try lhs_val.floatRem(rhs_val, scalar_type, sema.arena, target),
);
} else {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
@@ -8858,7 +8858,7 @@ fn analyzeArithmetic(
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
scalar_type,
- try lhs_val.floatRem(rhs_val, sema.arena),
+ try lhs_val.floatRem(rhs_val, scalar_type, sema.arena, target),
);
} else break :rs .{ .src = rhs_src, .air_tag = .rem };
} else break :rs .{ .src = lhs_src, .air_tag = .rem };
@@ -8915,7 +8915,7 @@ fn analyzeArithmetic(
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
scalar_type,
- try lhs_val.floatMod(rhs_val, sema.arena),
+ try lhs_val.floatMod(rhs_val, scalar_type, sema.arena, target),
);
} else break :rs .{ .src = rhs_src, .air_tag = .mod };
} else break :rs .{ .src = lhs_src, .air_tag = .mod };
@@ -14195,12 +14195,12 @@ fn coerce(
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) {
.ComptimeFloat => {
const val = try sema.resolveConstValue(block, inst_src, inst);
- const result_val = try val.floatCast(sema.arena, dest_ty);
+ const result_val = try val.floatCast(sema.arena, dest_ty, target);
return try sema.addConstant(dest_ty, result_val);
},
.Float => {
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
- const result_val = try val.floatCast(sema.arena, dest_ty);
+ const result_val = try val.floatCast(sema.arena, dest_ty, target);
if (!val.eql(result_val, dest_ty)) {
return sema.fail(
block,
diff --git a/src/value.zig b/src/value.zig
index e444e2daf1..18c8357b6b 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -138,6 +138,7 @@ pub const Value = extern union {
float_16,
float_32,
float_64,
+ float_80,
float_128,
enum_literal,
/// A specific enum tag, indicated by the field index (declaration order).
@@ -295,6 +296,7 @@ pub const Value = extern union {
.float_16 => Payload.Float_16,
.float_32 => Payload.Float_32,
.float_64 => Payload.Float_64,
+ .float_80 => Payload.Float_80,
.float_128 => Payload.Float_128,
.@"error" => Payload.Error,
.inferred_alloc => Payload.InferredAlloc,
@@ -546,6 +548,7 @@ pub const Value = extern union {
.float_16 => return self.copyPayloadShallow(arena, Payload.Float_16),
.float_32 => return self.copyPayloadShallow(arena, Payload.Float_32),
.float_64 => return self.copyPayloadShallow(arena, Payload.Float_64),
+ .float_80 => return self.copyPayloadShallow(arena, Payload.Float_80),
.float_128 => return self.copyPayloadShallow(arena, Payload.Float_128),
.enum_literal => {
const payload = self.castTag(.enum_literal).?;
@@ -733,6 +736,7 @@ pub const Value = extern union {
.float_16 => return out_stream.print("{}", .{val.castTag(.float_16).?.data}),
.float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}),
.float_64 => return out_stream.print("{}", .{val.castTag(.float_64).?.data}),
+ .float_80 => return out_stream.print("{}", .{val.castTag(.float_80).?.data}),
.float_128 => return out_stream.print("{}", .{val.castTag(.float_128).?.data}),
.@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}),
// TODO to print this it should be error{ Set, Items }!T(val), but we need the type for that
@@ -1083,6 +1087,7 @@ pub const Value = extern union {
16 => return Value.Tag.float_16.create(arena, floatReadFromMemory(f16, target, buffer)),
32 => return Value.Tag.float_32.create(arena, floatReadFromMemory(f32, target, buffer)),
64 => return Value.Tag.float_64.create(arena, floatReadFromMemory(f64, target, buffer)),
+ 80 => return Value.Tag.float_80.create(arena, floatReadFromMemory(f80, target, buffer)),
128 => return Value.Tag.float_128.create(arena, floatReadFromMemory(f128, target, buffer)),
else => unreachable,
},
@@ -1100,6 +1105,12 @@ pub const Value = extern union {
}
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
+ if (F == f80) {
+ // TODO: use std.math.F80Repr
+ const big_int = std.mem.readInt(u128, buffer[0..16], target.cpu.arch.endian());
+ const int = @truncate(u80, big_int);
+ return @bitCast(F, int);
+ }
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
@@ -1114,6 +1125,7 @@ pub const Value = extern union {
.float_16 => @floatCast(T, val.castTag(.float_16).?.data),
.float_32 => @floatCast(T, val.castTag(.float_32).?.data),
.float_64 => @floatCast(T, val.castTag(.float_64).?.data),
+ .float_80 => @floatCast(T, val.castTag(.float_80).?.data),
.float_128 => @floatCast(T, val.castTag(.float_128).?.data),
.zero => 0,
@@ -1367,14 +1379,13 @@ pub const Value = extern union {
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
- pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type) !Value {
- switch (dest_ty.tag()) {
- .f16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
- .f32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
- .f64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)),
- .f128, .comptime_float, .c_longdouble => {
- return Value.Tag.float_128.create(arena, self.toFloat(f128));
- },
+ pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ switch (dest_ty.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
+ 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
+ 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)),
+ 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80)),
+ 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128)),
else => unreachable,
}
}
@@ -1389,8 +1400,8 @@ pub const Value = extern union {
.float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0,
.float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0,
.float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0,
- // .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
- .float_128 => @panic("TODO lld: error: undefined symbol: fmodl"),
+ .float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0,
+ .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
else => unreachable,
};
@@ -1408,6 +1419,7 @@ pub const Value = extern union {
.float_16 => self.castTag(.float_16).?.data == 0,
.float_32 => self.castTag(.float_32).?.data == 0,
.float_64 => self.castTag(.float_64).?.data == 0,
+ .float_80 => self.castTag(.float_80).?.data == 0,
.float_128 => self.castTag(.float_128).?.data == 0,
.int_big_positive => self.castTag(.int_big_positive).?.asBigInt().eqZero(),
@@ -1440,6 +1452,7 @@ pub const Value = extern union {
.float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0),
.float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0),
.float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0),
+ .float_80 => std.math.order(lhs.castTag(.float_80).?.data, 0),
.float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0),
else => unreachable,
@@ -1471,6 +1484,7 @@ pub const Value = extern union {
.float_16 => return std.math.order(lhs.castTag(.float_16).?.data, rhs.castTag(.float_16).?.data),
.float_32 => return std.math.order(lhs.castTag(.float_32).?.data, rhs.castTag(.float_32).?.data),
.float_64 => return std.math.order(lhs.castTag(.float_64).?.data, rhs.castTag(.float_64).?.data),
+ .float_80 => return std.math.order(lhs.castTag(.float_80).?.data, rhs.castTag(.float_80).?.data),
.float_128 => return std.math.order(lhs.castTag(.float_128).?.data, rhs.castTag(.float_128).?.data),
else => unreachable,
};
@@ -2139,6 +2153,7 @@ pub const Value = extern union {
.float_16,
.float_32,
.float_64,
+ .float_80,
.float_128,
=> true,
else => false,
@@ -2174,6 +2189,7 @@ pub const Value = extern union {
16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)),
+ 80 => return Value.Tag.float_80.create(arena, @intToFloat(f80, x)),
128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)),
else => unreachable,
}
@@ -2184,6 +2200,7 @@ pub const Value = extern union {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
64 => return Value.Tag.float_64.create(arena, @floatCast(f64, float)),
+ 80 => return Value.Tag.float_80.create(arena, @floatCast(f80, float)),
128 => return Value.Tag.float_128.create(arena, float),
else => unreachable,
}
@@ -2281,7 +2298,7 @@ pub const Value = extern union {
}
if (ty.isAnyFloat()) {
- return floatAdd(lhs, rhs, ty, arena);
+ return floatAdd(lhs, rhs, ty, arena, target);
}
const overflow_result = try intAddWithOverflow(lhs, rhs, ty, arena, target);
@@ -2371,7 +2388,7 @@ pub const Value = extern union {
}
if (ty.isAnyFloat()) {
- return floatSub(lhs, rhs, ty, arena);
+ return floatSub(lhs, rhs, ty, arena, target);
}
const overflow_result = try intSubWithOverflow(lhs, rhs, ty, arena, target);
@@ -2454,7 +2471,7 @@ pub const Value = extern union {
}
if (ty.isAnyFloat()) {
- return floatMul(lhs, rhs, ty, arena);
+ return floatMul(lhs, rhs, ty, arena, target);
}
const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target);
@@ -2753,23 +2770,72 @@ pub const Value = extern union {
.float_16 => std.math.isNan(val.castTag(.float_16).?.data),
.float_32 => std.math.isNan(val.castTag(.float_32).?.data),
.float_64 => std.math.isNan(val.castTag(.float_64).?.data),
+ .float_80 => std.math.isNan(val.castTag(.float_80).?.data),
.float_128 => std.math.isNan(val.castTag(.float_128).?.data),
else => false,
};
}
- pub fn floatRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
- _ = lhs;
- _ = rhs;
- _ = allocator;
- @panic("TODO implement Value.floatRem");
+ pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 128 => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
}
- pub fn floatMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
- _ = lhs;
- _ = rhs;
- _ = allocator;
- @panic("TODO implement Value.floatMod");
+ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 128 => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
}
pub fn intMul(lhs: Value, rhs: Value, allocator: Allocator) !Value {
@@ -2929,24 +2995,30 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val + rhs_val);
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val + rhs_val);
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val + rhs_val);
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, lhs_val + rhs_val);
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val + rhs_val);
@@ -2960,24 +3032,30 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val - rhs_val);
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val - rhs_val);
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val - rhs_val);
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, lhs_val - rhs_val);
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val - rhs_val);
@@ -2991,24 +3069,30 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val / rhs_val);
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val / rhs_val);
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val / rhs_val);
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, lhs_val / rhs_val);
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val / rhs_val);
@@ -3022,24 +3106,30 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val));
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val));
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val));
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val));
@@ -3053,24 +3143,30 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val));
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val));
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val));
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val));
@@ -3084,24 +3180,30 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val * rhs_val);
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val * rhs_val);
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val * rhs_val);
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, lhs_val * rhs_val);
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val * rhs_val);
@@ -3250,6 +3352,13 @@ pub const Value = extern union {
data: f64,
};
+ pub const Float_80 = struct {
+ pub const base_tag = Tag.float_80;
+
+ base: Payload = .{ .tag = base_tag },
+ data: f80,
+ };
+
pub const Float_128 = struct {
pub const base_tag = Tag.float_128;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 8f947e2829..a1243eb7c1 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -768,8 +768,6 @@ test "shift left/right on u0 operand" {
}
test "comptime float rem int" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
comptime {
var x = @as(f32, 1) % 2;
try expect(x == 1.0);
From 545aa790a430bd2c8390435ee52f5fbe147f6c54 Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Sun, 6 Feb 2022 12:30:30 +0100
Subject: [PATCH 0054/2031] Sema: Fix memory leak
---
src/Sema.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/Sema.zig b/src/Sema.zig
index 934fa4064b..07e71c4bff 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -7058,6 +7058,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
sema.air_extra.appendSliceAssumeCapacity(cond_body);
}
+ gpa.free(prev_then_body);
prev_then_body = case_block.instructions.toOwnedSlice(gpa);
prev_cond_br = new_cond_br;
}
From 53e6c719efe5073307ee58436b17ee80f574b984 Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Sat, 5 Feb 2022 20:36:48 +1300
Subject: [PATCH 0055/2031] std/math: optimize division with divisors less than
a half-limb
This adds a new path which avoids using compiler_rt generated div
udivmod instructions in the case that a divisor is less than half the
max usize value. Two half-limb divisions are performed instead which
ensures that non-emulated division instructions are actually used. This
does not improve the udivmod code which should still be reviewed
independently of this issue.
Notably this improves the performance of the toString implementation of
non-power-of-two bases considerably.
Division performance is improved ~1000% based on some coarse testing.
The following test code is used to provide a rough comparison between
the old vs. new method.
```
const std = @import("std");
const Managed = std.math.big.int.Managed;
const allocator = std.heap.c_allocator;
fn fib(a: *Managed, n: usize) !void {
var b = try Managed.initSet(allocator, 1);
defer b.deinit();
var c = try Managed.init(allocator);
defer c.deinit();
var i: usize = 0;
while (i < n) : (i += 1) {
try c.add(a.toConst(), b.toConst());
a.swap(&b);
b.swap(&c);
}
}
pub fn main() !void {
var a = try Managed.initSet(allocator, 0);
defer a.deinit();
try fib(&a, 1_000_000);
// Note: Next two lines (and printed digit count) omitted on no-print version.
const as = try a.toString(allocator, 10, .lower);
defer allocator.free(as);
std.debug.print("fib: digit count: {}, limb count: {}\n", .{ as.len, a.limbs.len });
}
```
```
==> time.no-print <==
limb count: 10849
________________________________________________________
Executed in 10.60 secs fish external
usr time 10.44 secs 0.00 millis 10.44 secs
sys time 0.02 secs 1.12 millis 0.02 secs
==> time.old <==
fib: digit count: 208988, limb count: 10849
________________________________________________________
Executed in 22.78 secs fish external
usr time 22.43 secs 1.01 millis 22.43 secs
sys time 0.03 secs 0.13 millis 0.03 secs
==> time.optimized <==
fib: digit count: 208988, limb count: 10849
________________________________________________________
Executed in 11.59 secs fish external
usr time 11.56 secs 1.03 millis 11.56 secs
sys time 0.03 secs 0.12 millis 0.03 secs
```
Perf data for non-optimized and optimized, verifying no udivmod is
generated by the new code.
```
$ perf report -i perf.data.old --stdio
- Total Lost Samples: 0
-
- Samples: 90K of event 'cycles:u'
- Event count (approx.): 71603695208
-
- Overhead Command Shared Object Symbol
- ........ ....... ................ ...........................................
-
52.97% t t [.] compiler_rt.udivmod.udivmod
45.97% t t [.] std.math.big.int.Mutable.addCarry
0.83% t t [.] main
0.08% t libc-2.33.so [.] __memmove_avx_unaligned_erms
0.08% t t [.] __udivti3
0.03% t [unknown] [k] 0xffffffff9a0010a7
0.02% t t [.] std.math.big.int.Managed.ensureCapacity
0.01% t libc-2.33.so [.] _int_malloc
0.00% t libc-2.33.so [.] __malloc_usable_size
0.00% t libc-2.33.so [.] _int_free
0.00% t t [.] 0x0000000000004a80
0.00% t t [.] std.heap.CAllocator.resize
0.00% t libc-2.33.so [.] _mid_memalign
0.00% t libc-2.33.so [.] sysmalloc
0.00% t libc-2.33.so [.] __posix_memalign
0.00% t t [.] std.heap.CAllocator.alloc
0.00% t ld-2.33.so [.] do_lookup_x
$ perf report -i perf.data.optimized --stdio
- Total Lost Samples: 0
-
- Samples: 46K of event 'cycles:u'
- Event count (approx.): 36790112336
-
- Overhead Command Shared Object Symbol
- ........ ....... ................ ...........................................
-
79.98% t t [.] std.math.big.int.Mutable.addCarry
15.14% t t [.] main
4.58% t t [.] std.math.big.int.Managed.ensureCapacity
0.21% t libc-2.33.so [.] __memmove_avx_unaligned_erms
0.05% t [unknown] [k] 0xffffffff9a0010a7
0.02% t libc-2.33.so [.] _int_malloc
0.01% t t [.] std.heap.CAllocator.alloc
0.01% t libc-2.33.so [.] __malloc_usable_size
0.00% t libc-2.33.so [.] systrim.constprop.0
0.00% t libc-2.33.so [.] _mid_memalign
0.00% t t [.] 0x0000000000000c7d
0.00% t libc-2.33.so [.] malloc
0.00% t ld-2.33.so [.] check_match
```
Closes #10630.
---
lib/std/math/big.zig | 1 +
lib/std/math/big/int.zig | 40 +++++++++++++++++++++++++++++++++--
lib/std/math/big/int_test.zig | 37 ++++++++++++++++++++++++++++++--
3 files changed, 74 insertions(+), 4 deletions(-)
diff --git a/lib/std/math/big.zig b/lib/std/math/big.zig
index e7f8a7fb34..c7fc0b17f5 100644
--- a/lib/std/math/big.zig
+++ b/lib/std/math/big.zig
@@ -7,6 +7,7 @@ pub const Limb = usize;
const limb_info = @typeInfo(Limb).Int;
pub const SignedLimb = std.meta.Int(.signed, limb_info.bits);
pub const DoubleLimb = std.meta.Int(.unsigned, 2 * limb_info.bits);
+pub const HalfLimb = std.meta.Int(.unsigned, limb_info.bits / 2);
pub const SignedDoubleLimb = std.meta.Int(.signed, 2 * limb_info.bits);
pub const Log2Limb = std.math.Log2Int(Limb);
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index ec0143a3d7..87a62bf66c 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -2,6 +2,8 @@ const std = @import("../../std.zig");
const math = std.math;
const Limb = std.math.big.Limb;
const limb_bits = @typeInfo(Limb).Int.bits;
+const HalfLimb = std.math.big.HalfLimb;
+const half_limb_bits = @typeInfo(HalfLimb).Int.bits;
const DoubleLimb = std.math.big.DoubleLimb;
const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
const Log2Limb = std.math.big.Log2Limb;
@@ -1335,7 +1337,16 @@ pub const Mutable = struct {
const xy_trailing = math.min(x_trailing, y_trailing);
if (y.len - xy_trailing == 1) {
- lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], y.limbs[y.len - 1]);
+ const divisor = y.limbs[y.len - 1];
+
+ // Optimization for small divisor. By using a half limb we can avoid requiring DoubleLimb
+ // divisions in the hot code path. This may often require compiler_rt software-emulation.
+ if (divisor < maxInt(HalfLimb)) {
+ lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @intCast(HalfLimb, divisor));
+ } else {
+ lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], divisor);
+ }
+
q.normalize(x.len - xy_trailing);
q.positive = q_positive;
@@ -1939,7 +1950,8 @@ pub const Const = struct {
}
} else {
// Non power-of-two: batch divisions per word size.
- const digits_per_limb = math.log(Limb, base, maxInt(Limb));
+ // We use a HalfLimb here so the division uses the faster lldiv0p5 over lldiv1 codepath.
+ const digits_per_limb = math.log(HalfLimb, base, maxInt(HalfLimb));
var limb_base: Limb = 1;
var j: usize = 0;
while (j < digits_per_limb) : (j += 1) {
@@ -3208,6 +3220,30 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
}
}
+fn lldiv0p5(quo: []Limb, rem: *Limb, a: []const Limb, b: HalfLimb) void {
+ @setRuntimeSafety(debug_safety);
+ assert(a.len > 1 or a[0] >= b);
+ assert(quo.len >= a.len);
+
+ rem.* = 0;
+ for (a) |_, ri| {
+ const i = a.len - ri - 1;
+ const ai_high = a[i] >> half_limb_bits;
+ const ai_low = a[i] & ((1 << half_limb_bits) - 1);
+
+ // Split the division into two divisions acting on half a limb each. Carry remainder.
+ const ai_high_with_carry = (rem.* << half_limb_bits) | ai_high;
+ const ai_high_quo = ai_high_with_carry / b;
+ rem.* = ai_high_with_carry % b;
+
+ const ai_low_with_carry = (rem.* << half_limb_bits) | ai_low;
+ const ai_low_quo = ai_low_with_carry / b;
+ rem.* = ai_low_with_carry % b;
+
+ quo[i] = (ai_high_quo << half_limb_bits) | ai_low_quo;
+ }
+}
+
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 4c1d12116e..70a9b97a38 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -1064,7 +1064,7 @@ test "big.int mulWrap large" {
try testing.expect(b.eq(c));
}
-test "big.int div single-single no rem" {
+test "big.int div single-half no rem" {
var a = try Managed.initSet(testing.allocator, 50);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 5);
@@ -1080,7 +1080,7 @@ test "big.int div single-single no rem" {
try testing.expect((try r.to(u32)) == 0);
}
-test "big.int div single-single with rem" {
+test "big.int div single-half with rem" {
var a = try Managed.initSet(testing.allocator, 49);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 5);
@@ -1096,6 +1096,39 @@ test "big.int div single-single with rem" {
try testing.expect((try r.to(u32)) == 4);
}
+test "big.int div single-single no rem" {
+ // assumes usize is <= 64 bits.
+ var a = try Managed.initSet(testing.allocator, 1 << 52);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, 1 << 35);
+ defer b.deinit();
+
+ var q = try Managed.init(testing.allocator);
+ defer q.deinit();
+ var r = try Managed.init(testing.allocator);
+ defer r.deinit();
+ try Managed.divTrunc(&q, &r, a.toConst(), b.toConst());
+
+ try testing.expect((try q.to(u32)) == 131072);
+ try testing.expect((try r.to(u32)) == 0);
+}
+
+test "big.int div single-single with rem" {
+ var a = try Managed.initSet(testing.allocator, (1 << 52) | (1 << 33));
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, (1 << 35));
+ defer b.deinit();
+
+ var q = try Managed.init(testing.allocator);
+ defer q.deinit();
+ var r = try Managed.init(testing.allocator);
+ defer r.deinit();
+ try Managed.divTrunc(&q, &r, a.toConst(), b.toConst());
+
+ try testing.expect((try q.to(u64)) == 131072);
+ try testing.expect((try r.to(u64)) == 8589934592);
+}
+
test "big.int div multi-single no rem" {
const op1 = 0xffffeeeeddddcccc;
const op2 = 34;
From 495fd4ee3e81bd9d26768cdb8fc639afac79d9dc Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 6 Feb 2022 19:45:49 -0700
Subject: [PATCH 0056/2031] AstGen: refactor redundant expressions
This is a non-functional change.
---
src/AstGen.zig | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 4133d3d364..5f6d05b7f5 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2601,8 +2601,8 @@ fn varDecl(
var resolve_inferred_alloc: Zir.Inst.Ref = .none;
var opt_type_inst: Zir.Inst.Ref = .none;
- if (var_decl.ast.type_node != 0) {
- const type_inst = try typeExpr(gz, &init_scope.base, var_decl.ast.type_node);
+ if (type_node != 0) {
+ const type_inst = try typeExpr(gz, &init_scope.base, type_node);
opt_type_inst = type_inst;
if (align_inst == .none) {
init_scope.instructions_top = gz.instructions.items.len;
@@ -2683,7 +2683,7 @@ fn varDecl(
const src_inst = gz.instructions.items[src];
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) {
- if (var_decl.ast.type_node != 0) {
+ if (type_node != 0) {
zir_tags[src_inst] = .store;
} else {
zir_tags[src_inst] = .store_to_inferred_ptr;
From d4805472c3c98c488c17bce0ee28b6c44e93793c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 6 Feb 2022 20:06:00 -0700
Subject: [PATCH 0057/2031] compiler_rt: addXf3: add coercion to `@clz`
We're going to remove the first parameter from this function in the
future. Stage2 already ignores the first parameter. So we put an `@as`
in here to make it work for both.
---
lib/std/special/compiler_rt/addXf3.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/std/special/compiler_rt/addXf3.zig
index 41ff00e95d..1339cc340d 100644
--- a/lib/std/special/compiler_rt/addXf3.zig
+++ b/lib/std/special/compiler_rt/addXf3.zig
@@ -339,7 +339,7 @@ pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
// If partial cancellation occurred, we need to left-shift the result
// and adjust the exponent:
if (a_int < int_bit << 3) {
- const shift = @intCast(i32, @clz(u80, a_int)) - @intCast(i32, @clz(u80, int_bit << 3));
+ const shift = @intCast(i32, @clz(u80, a_int)) - @intCast(i32, @clz(u80, @as(u80, int_bit) << 3));
a_int <<= @intCast(u7, shift);
a_exp -= shift;
}
From 3bcce5f6d1f48e20dd177a7e440ddea1c451e779 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 6 Feb 2022 20:07:43 -0700
Subject: [PATCH 0058/2031] Sema: implement writing structs to memory at
comptime
---
src/value.zig | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/src/value.zig b/src/value.zig
index 18c8357b6b..ac0344bf34 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1033,6 +1033,11 @@ pub const Value = extern union {
}
pub fn writeToMemory(val: Value, ty: Type, target: Target, buffer: []u8) void {
+ if (val.isUndef()) {
+ const size = @intCast(usize, ty.abiSize(target));
+ std.mem.set(u8, buffer[0..size], 0xaa);
+ return;
+ }
switch (ty.zigTypeTag()) {
.Int => {
var bigint_buffer: BigIntSpace = undefined;
@@ -1068,6 +1073,14 @@ pub const Value = extern union {
buf_off += elem_size;
}
},
+ .Struct => {
+ const fields = ty.structFields().values();
+ const field_vals = val.castTag(.@"struct").?.data;
+ for (fields) |field, i| {
+ const off = @intCast(usize, ty.structFieldOffset(i, target));
+ writeToMemory(field_vals[i], field.ty, target, buffer[off..]);
+ }
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1106,7 +1119,7 @@ pub const Value = extern union {
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
if (F == f80) {
- // TODO: use std.math.F80Repr
+ // TODO: use std.math.F80Repr?
const big_int = std.mem.readInt(u128, buffer[0..16], target.cpu.arch.endian());
const int = @truncate(u80, big_int);
return @bitCast(F, int);
From 65b6faa0485253b284f7a63601dc7d0f5858515a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 6 Feb 2022 20:23:40 -0700
Subject: [PATCH 0059/2031] Sema: avoid `@intToFloat` for f80 which breaks on
non-x86 targets
Currently Zig lowers `@intToFloat` for f80 incorrectly on non-x86
targets:
```
broken LLVM module found:
UIToFP result must be FP or FP vector
%62 = uitofp i64 %61 to i128
SIToFP result must be FP or FP vector
%66 = sitofp i64 %65 to i128
```
This happens because on such targets, we use i128 instead of x86_fp80 in
order to avoid "LLVM ERROR: Cannot select". `@intToFloat` must be
lowered differently to account for this difference as well.
---
src/stage1/codegen.cpp | 7 +++++--
src/value.zig | 22 +++++++++++++++++-----
2 files changed, 22 insertions(+), 7 deletions(-)
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index ec1454ce4f..02f84beeab 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -9432,11 +9432,14 @@ static void define_builtin_types(CodeGen *g) {
if (target_has_f80(g->zig_target)) {
entry->llvm_type = LLVMX86FP80Type();
} else {
+ // We use i128 here instead of x86_fp80 because on targets such as arm,
+ // LLVM will give "ERROR: Cannot select" for any instructions involving
+ // the x86_fp80 type.
entry->llvm_type = get_int_type(g, false, 128)->llvm_type;
}
entry->size_in_bits = 8 * 16;
- entry->abi_size = 16;
- entry->abi_align = 16;
+ entry->abi_size = 16; // matches LLVMABISizeOfType(LLVMX86FP80Type())
+ entry->abi_align = 16; // matches LLVMABIAlignmentOfType(LLVMX86FP80Type())
buf_init_from_str(&entry->name, "f80");
entry->data.floating.bit_count = 80;
diff --git a/src/value.zig b/src/value.zig
index ac0344bf34..6d551d9eba 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1120,8 +1120,8 @@ pub const Value = extern union {
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
if (F == f80) {
// TODO: use std.math.F80Repr?
- const big_int = std.mem.readInt(u128, buffer[0..16], target.cpu.arch.endian());
- const int = @truncate(u80, big_int);
+ const int = std.mem.readInt(u128, buffer[0..16], target.cpu.arch.endian());
+ // TODO shouldn't this be a bitcast from u80 to f80 instead of u128 to f80?
return @bitCast(F, int);
}
const Int = @Type(.{ .Int = .{
@@ -1143,8 +1143,18 @@ pub const Value = extern union {
.zero => 0,
.one => 1,
- .int_u64 => @intToFloat(T, val.castTag(.int_u64).?.data),
- .int_i64 => @intToFloat(T, val.castTag(.int_i64).?.data),
+ .int_u64 => {
+ if (T == f80) {
+ @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
+ }
+ return @intToFloat(T, val.castTag(.int_u64).?.data);
+ },
+ .int_i64 => {
+ if (T == f80) {
+ @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
+ }
+ return @intToFloat(T, val.castTag(.int_i64).?.data);
+ },
.int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)),
.int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)),
@@ -2202,7 +2212,9 @@ pub const Value = extern union {
16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)),
- 80 => return Value.Tag.float_80.create(arena, @intToFloat(f80, x)),
+ // We can't lower this properly on non-x86 llvm backends yet
+ //80 => return Value.Tag.float_80.create(arena, @intToFloat(f80, x)),
+ 80 => @panic("TODO f80 intToFloat"),
128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)),
else => unreachable,
}
From eb82fdf96c3392ec9eabacbaa98c976f9ccd264e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 6 Feb 2022 20:38:57 -0700
Subject: [PATCH 0060/2031] Sema: panic instead of lowering to unavailable
compiler-rt functions
Once the relevant compiler_rt functions are implemented, these panics
can be removed.
---
src/value.zig | 30 ++++++++++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)
diff --git a/src/value.zig b/src/value.zig
index 6d551d9eba..1a7f51ecd5 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1423,8 +1423,10 @@ pub const Value = extern union {
.float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0,
.float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0,
.float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0,
- .float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0,
- .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
+ //.float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0,
+ .float_80 => @panic("TODO implement __remx in compiler-rt"),
+ //.float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
+ .float_128 => @panic("TODO implement fmodl in compiler-rt"),
else => unreachable,
};
@@ -2819,11 +2821,17 @@ pub const Value = extern union {
return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val));
},
80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __remx");
+ }
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val));
},
128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt fmodl");
+ }
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val));
@@ -2850,11 +2858,17 @@ pub const Value = extern union {
return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val));
},
80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __modx");
+ }
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val));
},
128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt fmodl");
+ }
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val));
@@ -3113,6 +3127,9 @@ pub const Value = extern union {
return Value.Tag.float_64.create(arena, lhs_val / rhs_val);
},
80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __divxf3");
+ }
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, lhs_val / rhs_val);
@@ -3150,6 +3167,9 @@ pub const Value = extern union {
return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val));
},
80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __floorx");
+ }
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val));
@@ -3187,6 +3207,9 @@ pub const Value = extern union {
return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val));
},
80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __truncx");
+ }
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val));
@@ -3224,6 +3247,9 @@ pub const Value = extern union {
return Value.Tag.float_64.create(arena, lhs_val * rhs_val);
},
80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __mulxf3");
+ }
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, lhs_val * rhs_val);
From 5065830aa007c374c382be9e80ba924df6cecc78 Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Thu, 3 Feb 2022 15:27:01 -0700
Subject: [PATCH 0061/2031] Avoid depending on child process execution when not
supported by host OS
In accordance with the requesting issue (#10750):
- `zig test` skips any tests that it cannot spawn, returning success
- `zig run` and `zig build` exit with failure, reporting the command the cannot be run
- `zig clang`, `zig ar`, etc. already punt directly to the appropriate clang/lld main(), even before this change
- Native `libc` Detection is not supported
Additionally, `exec()` and related Builder functions error at run-time, reporting the command that cannot be run
---
build.zig | 7 ++
lib/std/build.zig | 38 +++++++++--
lib/std/build/RunStep.zig | 20 ++++++
lib/std/child_process.zig | 4 ++
lib/std/process.zig | 8 ++-
src/Compilation.zig | 107 ++++++++++++++++-------------
src/ThreadPool.zig | 3 +
src/libc_installation.zig | 4 +-
src/link/Coff.zig | 112 +++++++++++++++++--------------
src/link/Elf.zig | 112 +++++++++++++++++--------------
src/link/Wasm.zig | 112 +++++++++++++++++--------------
src/main.zig | 137 +++++++++++++++++++++++---------------
src/mingw.zig | 57 ++++++++--------
src/test.zig | 11 +++
test/tests.zig | 20 ++++++
15 files changed, 470 insertions(+), 282 deletions(-)
diff --git a/build.zig b/build.zig
index f2d154c702..d53d823868 100644
--- a/build.zig
+++ b/build.zig
@@ -229,6 +229,10 @@ pub fn build(b: *Builder) !void {
const version = if (opt_version_string) |version| version else v: {
const version_string = b.fmt("{d}.{d}.{d}", .{ zig_version.major, zig_version.minor, zig_version.patch });
+ if (!std.process.can_spawn) {
+ std.debug.print("error: version info cannot be retrieved from git. Zig version must be provided using -Dversion-string\n", .{});
+ std.process.exit(1);
+ }
var code: u8 = undefined;
const git_describe_untrimmed = b.execAllowFail(&[_][]const u8{
"git", "-C", b.build_root, "describe", "--match", "*.*.*", "--tags",
@@ -542,6 +546,9 @@ fn addCxxKnownPath(
errtxt: ?[]const u8,
need_cpp_includes: bool,
) !void {
+ if (!std.process.can_spawn)
+ return error.RequiredLibraryNotFound;
+
const path_padded = try b.exec(&[_][]const u8{
ctx.cxx_compiler,
b.fmt("-print-file-name={s}", .{objname}),
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 05f36f5714..fcaa115ccb 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -88,7 +88,14 @@ pub const Builder = struct {
/// Information about the native target. Computed before build() is invoked.
host: NativeTargetInfo,
- const PkgConfigError = error{
+ pub const ExecError = error{
+ ReadFailure,
+ ExitCodeFailure,
+ ProcessTerminated,
+ ExecNotSupported,
+ } || std.ChildProcess.SpawnError;
+
+ pub const PkgConfigError = error{
PkgConfigCrashed,
PkgConfigFailed,
PkgConfigNotInstalled,
@@ -959,6 +966,9 @@ pub const Builder = struct {
printCmd(cwd, argv);
}
+ if (!std.process.can_spawn)
+ return error.ExecNotSupported;
+
const child = std.ChildProcess.init(argv, self.allocator) catch unreachable;
defer child.deinit();
@@ -1168,9 +1178,12 @@ pub const Builder = struct {
argv: []const []const u8,
out_code: *u8,
stderr_behavior: std.ChildProcess.StdIo,
- ) ![]u8 {
+ ) ExecError![]u8 {
assert(argv.len != 0);
+ if (!std.process.can_spawn)
+ return error.ExecNotSupported;
+
const max_output_size = 400 * 1024;
const child = try std.ChildProcess.init(argv, self.allocator);
defer child.deinit();
@@ -1182,7 +1195,9 @@ pub const Builder = struct {
try child.spawn();
- const stdout = try child.stdout.?.reader().readAllAlloc(self.allocator, max_output_size);
+ const stdout = child.stdout.?.reader().readAllAlloc(self.allocator, max_output_size) catch {
+ return error.ReadFailure;
+ };
errdefer self.allocator.free(stdout);
const term = try child.wait();
@@ -1208,8 +1223,21 @@ pub const Builder = struct {
printCmd(null, argv);
}
+ if (!std.process.can_spawn) {
+ if (src_step) |s| warn("{s}...", .{s.name});
+ warn("Unable to spawn the following command: cannot spawn child process\n", .{});
+ printCmd(null, argv);
+ std.os.abort();
+ }
+
var code: u8 = undefined;
return self.execAllowFail(argv, &code, .Inherit) catch |err| switch (err) {
+ error.ExecNotSupported => {
+ if (src_step) |s| warn("{s}...", .{s.name});
+ warn("Unable to spawn the following command: cannot spawn child process\n", .{});
+ printCmd(null, argv);
+ std.os.abort();
+ },
error.FileNotFound => {
if (src_step) |s| warn("{s}...", .{s.name});
warn("Unable to spawn the following command: file not found\n", .{});
@@ -1260,7 +1288,7 @@ pub const Builder = struct {
) catch unreachable;
}
- fn execPkgConfigList(self: *Builder, out_code: *u8) ![]const PkgConfigPkg {
+ fn execPkgConfigList(self: *Builder, out_code: *u8) (PkgConfigError || ExecError)![]const PkgConfigPkg {
const stdout = try self.execAllowFail(&[_][]const u8{ "pkg-config", "--list-all" }, out_code, .Ignore);
var list = ArrayList(PkgConfigPkg).init(self.allocator);
errdefer list.deinit();
@@ -1287,6 +1315,7 @@ pub const Builder = struct {
} else |err| {
const result = switch (err) {
error.ProcessTerminated => error.PkgConfigCrashed,
+ error.ExecNotSupported => error.PkgConfigFailed,
error.ExitCodeFailure => error.PkgConfigFailed,
error.FileNotFound => error.PkgConfigNotInstalled,
error.InvalidName => error.PkgConfigNotInstalled,
@@ -1929,6 +1958,7 @@ pub const LibExeObjStep = struct {
"--libs",
}, &code, .Ignore)) |stdout| stdout else |err| switch (err) {
error.ProcessTerminated => return error.PkgConfigCrashed,
+ error.ExecNotSupported => return error.PkgConfigFailed,
error.ExitCodeFailure => return error.PkgConfigFailed,
error.FileNotFound => return error.PkgConfigNotInstalled,
else => return err,
diff --git a/lib/std/build/RunStep.zig b/lib/std/build/RunStep.zig
index 4e18d5d738..6bd1bda952 100644
--- a/lib/std/build/RunStep.zig
+++ b/lib/std/build/RunStep.zig
@@ -10,6 +10,8 @@ const mem = std.mem;
const process = std.process;
const ArrayList = std.ArrayList;
const BufMap = std.BufMap;
+const Allocator = mem.Allocator;
+const ExecError = build.Builder.ExecError;
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
@@ -136,6 +138,17 @@ pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8
) catch unreachable;
}
+fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
+ var cmd = std.ArrayList(u8).init(allocator);
+ defer cmd.deinit();
+ for (argv[0 .. argv.len - 1]) |arg| {
+ try cmd.appendSlice(arg);
+ try cmd.append(' ');
+ }
+ try cmd.appendSlice(argv[argv.len - 1]);
+ return cmd.toOwnedSlice();
+}
+
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
}
@@ -175,6 +188,13 @@ fn make(step: *Step) !void {
const argv = argv_list.items;
+ if (!std.process.can_spawn) {
+ const cmd = try argvCmd(self.builder.allocator, argv);
+ std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
+ self.builder.allocator.free(cmd);
+ return ExecError.ExecNotSupported;
+ }
+
const child = std.ChildProcess.init(argv, self.builder.allocator) catch unreachable;
defer child.deinit();
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 7808dcd1e5..4cbe840bba 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -124,6 +124,10 @@ pub const ChildProcess = struct {
/// On success must call `kill` or `wait`.
pub fn spawn(self: *ChildProcess) SpawnError!void {
+ if (!std.process.can_spawn) {
+ @compileError("the target operating system cannot spawn processes");
+ }
+
if (builtin.os.tag == .windows) {
return self.spawnWindows();
} else {
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 699c994abf..c0f11b22ce 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -950,7 +950,13 @@ pub fn getSelfExeSharedLibPaths(allocator: Allocator) error{OutOfMemory}![][:0]u
/// Tells whether calling the `execv` or `execve` functions will be a compile error.
pub const can_execv = switch (builtin.os.tag) {
- .windows, .haiku => false,
+ .windows, .haiku, .wasi => false,
+ else => true,
+};
+
+/// Tells whether spawning child processes is supported (e.g. via ChildProcess)
+pub const can_spawn = switch (builtin.os.tag) {
+ .wasi => false,
else => true,
};
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 21a0c6fe58..f07a7c9dd7 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -25,6 +25,7 @@ const libunwind = @import("libunwind.zig");
const libcxx = @import("libcxx.zig");
const wasi_libc = @import("wasi_libc.zig");
const fatal = @import("main.zig").fatal;
+const clangMain = @import("main.zig").clangMain;
const Module = @import("Module.zig");
const Cache = @import("Cache.zig");
const stage1 = @import("stage1.zig");
@@ -3667,55 +3668,71 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
dump_argv(argv.items);
}
- const child = try std.ChildProcess.init(argv.items, arena);
- defer child.deinit();
+ if (std.process.can_spawn) {
+ const child = try std.ChildProcess.init(argv.items, arena);
+ defer child.deinit();
- if (comp.clang_passthrough_mode) {
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
+ if (comp.clang_passthrough_mode) {
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
- return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- std.process.exit(code);
- }
- if (comp.clang_preprocessor_mode == .stdout)
- std.process.exit(0);
- },
- else => std.process.abort(),
+ const term = child.spawnAndWait() catch |err| {
+ return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.process.exit(code);
+ }
+ if (comp.clang_preprocessor_mode == .stdout)
+ std.process.exit(0);
+ },
+ else => std.process.abort(),
+ }
+ } else {
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Ignore;
+ child.stderr_behavior = .Pipe;
+
+ try child.spawn();
+
+ const stderr_reader = child.stderr.?.reader();
+
+ const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
+
+ const term = child.wait() catch |err| {
+ return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ };
+
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO parse clang stderr and turn it into an error message
+ // and then call failCObjWithOwnedErrorMsg
+ log.err("clang failed with stderr: {s}", .{stderr});
+ return comp.failCObj(c_object, "clang exited with code {d}", .{code});
+ }
+ },
+ else => {
+ log.err("clang terminated with stderr: {s}", .{stderr});
+ return comp.failCObj(c_object, "clang terminated unexpectedly", .{});
+ },
+ }
}
} else {
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Ignore;
- child.stderr_behavior = .Pipe;
-
- try child.spawn();
-
- const stderr_reader = child.stderr.?.reader();
-
- const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
-
- const term = child.wait() catch |err| {
- return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO parse clang stderr and turn it into an error message
- // and then call failCObjWithOwnedErrorMsg
- log.err("clang failed with stderr: {s}", .{stderr});
- return comp.failCObj(c_object, "clang exited with code {d}", .{code});
- }
- },
- else => {
- log.err("clang terminated with stderr: {s}", .{stderr});
- return comp.failCObj(c_object, "clang terminated unexpectedly", .{});
- },
+ const exit_code = try clangMain(arena, argv.items);
+ if (exit_code != 0) {
+ if (comp.clang_passthrough_mode) {
+ std.process.exit(exit_code);
+ } else {
+ return comp.failCObj(c_object, "clang exited with code {d}", .{exit_code});
+ }
+ }
+ if (comp.clang_passthrough_mode and
+ comp.clang_preprocessor_mode == .stdout)
+ {
+ std.process.exit(0);
}
}
diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig
index 4f9d8dc015..813d67db66 100644
--- a/src/ThreadPool.zig
+++ b/src/ThreadPool.zig
@@ -82,6 +82,9 @@ pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
}
fn destroyWorkers(self: *ThreadPool, spawned: usize) void {
+ if (builtin.single_threaded)
+ return;
+
for (self.workers[0..spawned]) |*worker| {
worker.thread.join();
worker.idle_node.data.deinit();
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index 4cd43c7567..fe1a2b2ca5 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -216,7 +216,7 @@ pub const LibCInstallation = struct {
self.crt_dir = try args.allocator.dupeZ(u8, "/system/develop/lib");
break :blk batch.wait();
};
- } else {
+ } else if (std.process.can_spawn) {
try blk: {
var batch = Batch(FindError!void, 2, .auto_async).init();
errdefer batch.wait() catch {};
@@ -229,6 +229,8 @@ pub const LibCInstallation = struct {
}
break :blk batch.wait();
};
+ } else {
+ return error.LibCRuntimeNotFound;
}
return self;
}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 894d5dd8f7..8426e5d50c 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -9,6 +9,7 @@ const fs = std.fs;
const allocPrint = std.fmt.allocPrint;
const mem = std.mem;
+const lldMain = @import("../main.zig").lldMain;
const trace = @import("../tracy.zig").trace;
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
@@ -1358,60 +1359,71 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
Compilation.dump_argv(argv.items[1..]);
}
- // Sadly, we must run LLD as a child process because it does not behave
- // properly as a library.
- const child = try std.ChildProcess.init(argv.items, arena);
- defer child.deinit();
+ if (std.process.can_spawn) {
+ // If possible, we run LLD as a child process because it does not always
+ // behave properly as a library, unfortunately.
+ // https://github.com/ziglang/zig/issues/3825
+ const child = try std.ChildProcess.init(argv.items, arena);
+ defer child.deinit();
- if (comp.clang_passthrough_mode) {
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
+ if (comp.clang_passthrough_mode) {
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO https://github.com/ziglang/zig/issues/6342
- std.process.exit(1);
- }
- },
- else => std.process.abort(),
+ const term = child.spawnAndWait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.process.exit(code);
+ }
+ },
+ else => std.process.abort(),
+ }
+ } else {
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Ignore;
+ child.stderr_behavior = .Pipe;
+
+ try child.spawn();
+
+ const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
+
+ const term = child.wait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO parse this output and surface with the Compilation API rather than
+ // directly outputting to stderr here.
+ std.debug.print("{s}", .{stderr});
+ return error.LLDReportedFailure;
+ }
+ },
+ else => {
+ log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
+ return error.LLDCrashed;
+ },
+ }
+
+ if (stderr.len != 0) {
+ log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ }
}
} else {
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Ignore;
- child.stderr_behavior = .Pipe;
-
- try child.spawn();
-
- const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
-
- const term = child.wait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO parse this output and surface with the Compilation API rather than
- // directly outputting to stderr here.
- std.debug.print("{s}", .{stderr});
- return error.LLDReportedFailure;
- }
- },
- else => {
- log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
- return error.LLDCrashed;
- },
- }
-
- if (stderr.len != 0) {
- log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ const exit_code = try lldMain(arena, argv.items);
+ if (exit_code != 0) {
+ if (comp.clang_passthrough_mode) {
+ std.process.exit(exit_code);
+ } else {
+ return error.LLDReportedFailure;
+ }
}
}
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 37afd68c82..2a550d26e6 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -14,6 +14,7 @@ const leb128 = std.leb;
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const codegen = @import("../codegen.zig");
+const lldMain = @import("../main.zig").lldMain;
const trace = @import("../tracy.zig").trace;
const Package = @import("../Package.zig");
const Value = @import("../value.zig").Value;
@@ -1950,60 +1951,71 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
Compilation.dump_argv(argv.items[1..]);
}
- // Sadly, we must run LLD as a child process because it does not behave
- // properly as a library.
- const child = try std.ChildProcess.init(argv.items, arena);
- defer child.deinit();
+ if (std.process.can_spawn) {
+ // If possible, we run LLD as a child process because it does not always
+ // behave properly as a library, unfortunately.
+ // https://github.com/ziglang/zig/issues/3825
+ const child = try std.ChildProcess.init(argv.items, arena);
+ defer child.deinit();
- if (comp.clang_passthrough_mode) {
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
+ if (comp.clang_passthrough_mode) {
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO https://github.com/ziglang/zig/issues/6342
- std.process.exit(1);
- }
- },
- else => std.process.abort(),
+ const term = child.spawnAndWait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.process.exit(code);
+ }
+ },
+ else => std.process.abort(),
+ }
+ } else {
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Ignore;
+ child.stderr_behavior = .Pipe;
+
+ try child.spawn();
+
+ const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
+
+ const term = child.wait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO parse this output and surface with the Compilation API rather than
+ // directly outputting to stderr here.
+ std.debug.print("{s}", .{stderr});
+ return error.LLDReportedFailure;
+ }
+ },
+ else => {
+ log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
+ return error.LLDCrashed;
+ },
+ }
+
+ if (stderr.len != 0) {
+ log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ }
}
} else {
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Ignore;
- child.stderr_behavior = .Pipe;
-
- try child.spawn();
-
- const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
-
- const term = child.wait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO parse this output and surface with the Compilation API rather than
- // directly outputting to stderr here.
- std.debug.print("{s}", .{stderr});
- return error.LLDReportedFailure;
- }
- },
- else => {
- log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
- return error.LLDCrashed;
- },
- }
-
- if (stderr.len != 0) {
- log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ const exit_code = try lldMain(arena, argv.items);
+ if (exit_code != 0) {
+ if (comp.clang_passthrough_mode) {
+ std.process.exit(exit_code);
+ } else {
+ return error.LLDReportedFailure;
+ }
}
}
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index b047e4b68a..91952468cc 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -15,6 +15,7 @@ const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const CodeGen = @import("../arch/wasm/CodeGen.zig");
const link = @import("../link.zig");
+const lldMain = @import("../main.zig").lldMain;
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const wasi_libc = @import("../wasi_libc.zig");
@@ -1486,60 +1487,71 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
Compilation.dump_argv(argv.items[1..]);
}
- // Sadly, we must run LLD as a child process because it does not behave
- // properly as a library.
- const child = try std.ChildProcess.init(argv.items, arena);
- defer child.deinit();
+ if (std.process.can_spawn) {
+ // If possible, we run LLD as a child process because it does not always
+ // behave properly as a library, unfortunately.
+ // https://github.com/ziglang/zig/issues/3825
+ const child = try std.ChildProcess.init(argv.items, arena);
+ defer child.deinit();
- if (comp.clang_passthrough_mode) {
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
+ if (comp.clang_passthrough_mode) {
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO https://github.com/ziglang/zig/issues/6342
- std.process.exit(1);
- }
- },
- else => std.process.abort(),
+ const term = child.spawnAndWait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.process.exit(code);
+ }
+ },
+ else => std.process.abort(),
+ }
+ } else {
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Ignore;
+ child.stderr_behavior = .Pipe;
+
+ try child.spawn();
+
+ const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
+
+ const term = child.wait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO parse this output and surface with the Compilation API rather than
+ // directly outputting to stderr here.
+ std.debug.print("{s}", .{stderr});
+ return error.LLDReportedFailure;
+ }
+ },
+ else => {
+ log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
+ return error.LLDCrashed;
+ },
+ }
+
+ if (stderr.len != 0) {
+ log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ }
}
} else {
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Ignore;
- child.stderr_behavior = .Pipe;
-
- try child.spawn();
-
- const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
-
- const term = child.wait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO parse this output and surface with the Compilation API rather than
- // directly outputting to stderr here.
- std.debug.print("{s}", .{stderr});
- return error.LLDReportedFailure;
- }
- },
- else => {
- log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
- return error.LLDCrashed;
- },
- }
-
- if (stderr.len != 0) {
- log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ const exit_code = try lldMain(arena, argv.items);
+ if (exit_code != 0) {
+ if (comp.clang_passthrough_mode) {
+ std.process.exit(exit_code);
+ } else {
+ return error.LLDReportedFailure;
+ }
}
}
}
diff --git a/src/main.zig b/src/main.zig
index bbdb948c90..d6688081f1 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -221,7 +221,7 @@ pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
mem.eql(u8, cmd, "lib") or
mem.eql(u8, cmd, "ar"))
{
- return punt_to_llvm_ar(arena, args);
+ return process.exit(try llvmArMain(arena, args));
} else if (mem.eql(u8, cmd, "cc")) {
return buildOutputType(gpa, arena, args, .cc);
} else if (mem.eql(u8, cmd, "c++")) {
@@ -231,12 +231,12 @@ pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
} else if (mem.eql(u8, cmd, "clang") or
mem.eql(u8, cmd, "-cc1") or mem.eql(u8, cmd, "-cc1as"))
{
- return punt_to_clang(arena, args);
+ return process.exit(try clangMain(arena, args));
} else if (mem.eql(u8, cmd, "ld.lld") or
mem.eql(u8, cmd, "lld-link") or
mem.eql(u8, cmd, "wasm-ld"))
{
- return punt_to_lld(arena, args);
+ return process.exit(try lldMain(arena, args));
} else if (mem.eql(u8, cmd, "build")) {
return cmdBuild(gpa, arena, cmd_args);
} else if (mem.eql(u8, cmd, "fmt")) {
@@ -1347,7 +1347,7 @@ fn buildOutputType(
.ignore => {},
.driver_punt => {
// Never mind what we're doing, just pass the args directly. For example --help.
- return punt_to_clang(arena, all_args);
+ return process.exit(try clangMain(arena, all_args));
},
.pic => want_pic = true,
.no_pic => want_pic = false,
@@ -1866,7 +1866,7 @@ fn buildOutputType(
// An error message is generated when there is more than 1 C source file.
if (c_source_files.items.len != 1) {
// For example `zig cc` and no args should print the "no input files" message.
- return punt_to_clang(arena, all_args);
+ return process.exit(try clangMain(arena, all_args));
}
if (out_path) |p| {
emit_bin = .{ .yes = p };
@@ -1882,7 +1882,7 @@ fn buildOutputType(
{
// For example `zig cc` and no args should print the "no input files" message.
// There could be other reasons to punt to clang, for example, --help.
- return punt_to_clang(arena, all_args);
+ return process.exit(try clangMain(arena, all_args));
}
},
}
@@ -2883,7 +2883,7 @@ fn runOrTest(
try warnAboutForeignBinaries(gpa, arena, arg_mode, target_info, link_libc);
const cmd = try argvCmd(arena, argv.items);
fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd });
- } else {
+ } else if (std.process.can_spawn) {
const child = try std.ChildProcess.init(argv.items, gpa);
defer child.deinit();
@@ -2943,6 +2943,9 @@ fn runOrTest(
},
else => unreachable,
}
+ } else {
+ const cmd = try argvCmd(arena, argv.items);
+ fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
}
}
@@ -3553,29 +3556,35 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
break :argv child_argv.items;
};
- const child = try std.ChildProcess.init(child_argv, gpa);
- defer child.deinit();
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
+ if (std.process.can_spawn) {
+ const child = try std.ChildProcess.init(child_argv, gpa);
+ defer child.deinit();
- const term = try child.spawnAndWait();
- switch (term) {
- .Exited => |code| {
- if (code == 0) return cleanExit();
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
- if (prominent_compile_errors) {
- fatal("the build command failed with exit code {d}", .{code});
- } else {
+ const term = try child.spawnAndWait();
+ switch (term) {
+ .Exited => |code| {
+ if (code == 0) return cleanExit();
+
+ if (prominent_compile_errors) {
+ fatal("the build command failed with exit code {d}", .{code});
+ } else {
+ const cmd = try argvCmd(arena, child_argv);
+ fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
+ }
+ },
+ else => {
const cmd = try argvCmd(arena, child_argv);
- fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
- }
- },
- else => {
- const cmd = try argvCmd(arena, child_argv);
- fatal("the following build command crashed:\n{s}", .{cmd});
- },
+ fatal("the following build command crashed:\n{s}", .{cmd});
+ },
+ }
+ } else {
+ const cmd = try argvCmd(arena, child_argv);
+ fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
}
}
@@ -4080,51 +4089,69 @@ pub const info_zen =
extern "c" fn ZigClang_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
extern "c" fn ZigLlvmAr_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
-/// TODO https://github.com/ziglang/zig/issues/3257
-fn punt_to_clang(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
- if (!build_options.have_llvm)
- fatal("`zig cc` and `zig c++` unavailable: compiler built without LLVM extensions", .{});
- // Convert the args to the format Clang expects.
- const argv = try arena.alloc(?[*:0]u8, args.len + 1);
+fn argsCopyZ(alloc: Allocator, args: []const []const u8) ![:null]?[*:0]u8 {
+ var argv = try alloc.allocSentinel(?[*:0]u8, args.len, null);
for (args) |arg, i| {
- argv[i] = try arena.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation.
+ argv[i] = try alloc.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation.
}
- argv[args.len] = null;
- const exit_code = ZigClang_main(@intCast(c_int, args.len), argv[0..args.len :null].ptr);
- process.exit(@bitCast(u8, @truncate(i8, exit_code)));
+ return argv;
}
-/// TODO https://github.com/ziglang/zig/issues/3257
-fn punt_to_llvm_ar(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
+pub fn clangMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 {
+ if (!build_options.have_llvm)
+ fatal("`zig cc` and `zig c++` unavailable: compiler built without LLVM extensions", .{});
+
+ var arena_instance = std.heap.ArenaAllocator.init(alloc);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
+
+ // Convert the args to the null-terminated format Clang expects.
+ const argv = try argsCopyZ(arena, args);
+ const exit_code = ZigClang_main(@intCast(c_int, argv.len), argv.ptr);
+ return @bitCast(u8, @truncate(i8, exit_code));
+}
+
+pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 {
if (!build_options.have_llvm)
fatal("`zig ar`, `zig dlltool`, `zig ranlib', and `zig lib` unavailable: compiler built without LLVM extensions", .{});
+ var arena_instance = std.heap.ArenaAllocator.init(alloc);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
+
// Convert the args to the format llvm-ar expects.
- // We subtract 1 to shave off the zig binary from args[0].
- const argv = try arena.allocSentinel(?[*:0]u8, args.len - 1, null);
- for (args[1..]) |arg, i| {
- // TODO If there was an argsAllocZ we could avoid this allocation.
- argv[i] = try arena.dupeZ(u8, arg);
- }
- const argc = @intCast(c_int, argv.len);
- const exit_code = ZigLlvmAr_main(argc, argv.ptr);
- process.exit(@bitCast(u8, @truncate(i8, exit_code)));
+ // We intentionally shave off the zig binary at args[0].
+ const argv = try argsCopyZ(arena, args[1..]);
+ const exit_code = ZigLlvmAr_main(@intCast(c_int, argv.len), argv.ptr);
+ return @bitCast(u8, @truncate(i8, exit_code));
}
/// The first argument determines which backend is invoked. The options are:
/// * `ld.lld` - ELF
/// * `lld-link` - COFF
/// * `wasm-ld` - WebAssembly
-/// TODO https://github.com/ziglang/zig/issues/3257
-pub fn punt_to_lld(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
+pub fn lldMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 {
if (!build_options.have_llvm)
fatal("`zig {s}` unavailable: compiler built without LLVM extensions", .{args[0]});
- // Convert the args to the format LLD expects.
- // We subtract 1 to shave off the zig binary from args[0].
- const argv = try arena.allocSentinel(?[*:0]const u8, args.len - 1, null);
- for (args[1..]) |arg, i| {
- argv[i] = try arena.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation.
+
+ // Print a warning if lld is called multiple times in the same process,
+ // since it may misbehave
+ // https://github.com/ziglang/zig/issues/3825
+ const CallCounter = struct {
+ var count: usize = 0;
+ };
+ if (CallCounter.count == 1) { // Issue the warning on the first repeat call
+ warn("calling lldMain repeatedly within the same process can have side effects (https://github.com/ziglang/zig/issues/3825)", .{});
}
+ CallCounter.count += 1;
+
+ var arena_instance = std.heap.ArenaAllocator.init(alloc);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
+
+ // Convert the args to the format llvm-ar expects.
+ // We intentionally shave off the zig binary at args[0].
+ const argv = try argsCopyZ(arena, args[1..]);
const exit_code = rc: {
const llvm = @import("codegen/llvm/bindings.zig");
const argc = @intCast(c_int, argv.len);
@@ -4138,7 +4165,7 @@ pub fn punt_to_lld(arena: Allocator, args: []const []const u8) error{OutOfMemory
unreachable;
}
};
- process.exit(@bitCast(u8, @truncate(i8, exit_code)));
+ return @bitCast(u8, @truncate(i8, exit_code));
}
const clang_args = @import("clang_options.zig").list;
diff --git a/src/mingw.zig b/src/mingw.zig
index f555634459..84ec0795f1 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -5,6 +5,7 @@ const path = std.fs.path;
const assert = std.debug.assert;
const log = std.log.scoped(.mingw);
+const builtin = @import("builtin");
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
@@ -367,39 +368,43 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
Compilation.dump_argv(&args);
}
- const child = try std.ChildProcess.init(&args, arena);
- defer child.deinit();
+ if (std.process.can_spawn) {
+ const child = try std.ChildProcess.init(&args, arena);
+ defer child.deinit();
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Pipe;
- child.stderr_behavior = .Pipe;
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Pipe;
+ child.stderr_behavior = .Pipe;
- try child.spawn();
+ try child.spawn();
- const stderr_reader = child.stderr.?.reader();
+ const stderr_reader = child.stderr.?.reader();
- // TODO https://github.com/ziglang/zig/issues/6343
- const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
+ // TODO https://github.com/ziglang/zig/issues/6343
+ const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
- const term = child.wait() catch |err| {
- // TODO surface a proper error here
- log.err("unable to spawn {s}: {s}", .{ args[0], @errorName(err) });
- return error.ClangPreprocessorFailed;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO surface a proper error here
- log.err("clang exited with code {d} and stderr: {s}", .{ code, stderr });
- return error.ClangPreprocessorFailed;
- }
- },
- else => {
+ const term = child.wait() catch |err| {
// TODO surface a proper error here
- log.err("clang terminated unexpectedly with stderr: {s}", .{stderr});
+ log.err("unable to spawn {s}: {s}", .{ args[0], @errorName(err) });
return error.ClangPreprocessorFailed;
- },
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO surface a proper error here
+ log.err("clang exited with code {d} and stderr: {s}", .{ code, stderr });
+ return error.ClangPreprocessorFailed;
+ }
+ },
+ else => {
+ // TODO surface a proper error here
+ log.err("clang terminated unexpectedly with stderr: {s}", .{stderr});
+ return error.ClangPreprocessorFailed;
+ },
+ }
+ } else {
+ log.err("unable to spawn {s}: spawning child process not supported on {s}", .{ args[0], @tagName(builtin.os.tag) });
+ return error.ClangPreprocessorFailed;
}
const lib_final_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{
diff --git a/src/test.zig b/src/test.zig
index e02ea04f1c..b73e11d7f5 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -730,6 +730,12 @@ pub const TestContext = struct {
// * cannot handle updates
// because of this we must spawn a child process rather than
// using Compilation directly.
+
+ if (!std.process.can_spawn) {
+ print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
+ return; // Pass test.
+ }
+
assert(case.updates.items.len == 1);
const update = case.updates.items[0];
try tmp.dir.writeFile(tmp_src_path, update.src);
@@ -1104,6 +1110,11 @@ pub const TestContext = struct {
}
},
.Execution => |expected_stdout| {
+ if (!std.process.can_spawn) {
+ print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
+ return; // Pass test.
+ }
+
update_node.setEstimatedTotalItems(4);
var argv = std.ArrayList([]const u8).init(allocator);
diff --git a/test/tests.zig b/test/tests.zig
index 70387b8d47..bd6c82cbb7 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -10,6 +10,8 @@ const fmt = std.fmt;
const ArrayList = std.ArrayList;
const Mode = std.builtin.Mode;
const LibExeObjStep = build.LibExeObjStep;
+const Allocator = mem.Allocator;
+const ExecError = build.Builder.ExecError;
// Cases
const compare_output = @import("compare_output.zig");
@@ -26,6 +28,17 @@ pub const TranslateCContext = @import("src/translate_c.zig").TranslateCContext;
pub const RunTranslatedCContext = @import("src/run_translated_c.zig").RunTranslatedCContext;
pub const CompareOutputContext = @import("src/compare_output.zig").CompareOutputContext;
+fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
+ var cmd = std.ArrayList(u8).init(allocator);
+ defer cmd.deinit();
+ for (argv[0 .. argv.len - 1]) |arg| {
+ try cmd.appendSlice(arg);
+ try cmd.append(' ');
+ }
+ try cmd.appendSlice(argv[argv.len - 1]);
+ return cmd.toOwnedSlice();
+}
+
const TestTarget = struct {
target: CrossTarget = @as(CrossTarget, .{}),
mode: std.builtin.Mode = .Debug,
@@ -722,6 +735,13 @@ pub const StackTracesContext = struct {
std.debug.print("Test {d}/{d} {s}...", .{ self.test_index + 1, self.context.test_index, self.name });
+ if (!std.process.can_spawn) {
+ const cmd = try argvCmd(b.allocator, args.items);
+ std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
+ b.allocator.free(cmd);
+ return ExecError.ExecNotSupported;
+ }
+
const child = std.ChildProcess.init(args.items, b.allocator) catch unreachable;
defer child.deinit();
From c1cf158729f4d726639a5695754957f9f45f89da Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Sat, 5 Feb 2022 09:09:55 -0700
Subject: [PATCH 0062/2031] Replace argvCmd with std.mem.join
---
lib/std/build/RunStep.zig | 13 +------------
src/main.zig | 27 ++++++++-------------------
test/tests.zig | 13 +------------
3 files changed, 10 insertions(+), 43 deletions(-)
diff --git a/lib/std/build/RunStep.zig b/lib/std/build/RunStep.zig
index 6bd1bda952..e8544921d9 100644
--- a/lib/std/build/RunStep.zig
+++ b/lib/std/build/RunStep.zig
@@ -138,17 +138,6 @@ pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8
) catch unreachable;
}
-fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
- var cmd = std.ArrayList(u8).init(allocator);
- defer cmd.deinit();
- for (argv[0 .. argv.len - 1]) |arg| {
- try cmd.appendSlice(arg);
- try cmd.append(' ');
- }
- try cmd.appendSlice(argv[argv.len - 1]);
- return cmd.toOwnedSlice();
-}
-
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
}
@@ -189,7 +178,7 @@ fn make(step: *Step) !void {
const argv = argv_list.items;
if (!std.process.can_spawn) {
- const cmd = try argvCmd(self.builder.allocator, argv);
+ const cmd = try std.mem.join(self.builder.allocator, " ", argv);
std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
self.builder.allocator.free(cmd);
return ExecError.ExecNotSupported;
diff --git a/src/main.zig b/src/main.zig
index d6688081f1..6a1416cd6b 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -2881,7 +2881,7 @@ fn runOrTest(
// execv releases the locks; no need to destroy the Compilation here.
const err = std.process.execv(gpa, argv.items);
try warnAboutForeignBinaries(gpa, arena, arg_mode, target_info, link_libc);
- const cmd = try argvCmd(arena, argv.items);
+ const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd });
} else if (std.process.can_spawn) {
const child = try std.ChildProcess.init(argv.items, gpa);
@@ -2900,7 +2900,7 @@ fn runOrTest(
const term = child.spawnAndWait() catch |err| {
try warnAboutForeignBinaries(gpa, arena, arg_mode, target_info, link_libc);
- const cmd = try argvCmd(arena, argv.items);
+ const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following command failed with '{s}':\n{s}", .{ @errorName(err), cmd });
};
switch (arg_mode) {
@@ -2931,12 +2931,12 @@ fn runOrTest(
if (code == 0) {
if (!watch) return cleanExit();
} else {
- const cmd = try argvCmd(arena, argv.items);
+ const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following test command failed with exit code {d}:\n{s}", .{ code, cmd });
}
},
else => {
- const cmd = try argvCmd(arena, argv.items);
+ const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following test command crashed:\n{s}", .{cmd});
},
}
@@ -2944,7 +2944,7 @@ fn runOrTest(
else => unreachable,
}
} else {
- const cmd = try argvCmd(arena, argv.items);
+ const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
}
}
@@ -3573,32 +3573,21 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
if (prominent_compile_errors) {
fatal("the build command failed with exit code {d}", .{code});
} else {
- const cmd = try argvCmd(arena, child_argv);
+ const cmd = try std.mem.join(arena, " ", child_argv);
fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
}
},
else => {
- const cmd = try argvCmd(arena, child_argv);
+ const cmd = try std.mem.join(arena, " ", child_argv);
fatal("the following build command crashed:\n{s}", .{cmd});
},
}
} else {
- const cmd = try argvCmd(arena, child_argv);
+ const cmd = try std.mem.join(arena, " ", child_argv);
fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
}
}
-fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
- var cmd = std.ArrayList(u8).init(allocator);
- defer cmd.deinit();
- for (argv[0 .. argv.len - 1]) |arg| {
- try cmd.appendSlice(arg);
- try cmd.append(' ');
- }
- try cmd.appendSlice(argv[argv.len - 1]);
- return cmd.toOwnedSlice();
-}
-
fn readSourceFileToEndAlloc(
allocator: mem.Allocator,
input: *const fs.File,
diff --git a/test/tests.zig b/test/tests.zig
index bd6c82cbb7..5b15da2bcb 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -28,17 +28,6 @@ pub const TranslateCContext = @import("src/translate_c.zig").TranslateCContext;
pub const RunTranslatedCContext = @import("src/run_translated_c.zig").RunTranslatedCContext;
pub const CompareOutputContext = @import("src/compare_output.zig").CompareOutputContext;
-fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
- var cmd = std.ArrayList(u8).init(allocator);
- defer cmd.deinit();
- for (argv[0 .. argv.len - 1]) |arg| {
- try cmd.appendSlice(arg);
- try cmd.append(' ');
- }
- try cmd.appendSlice(argv[argv.len - 1]);
- return cmd.toOwnedSlice();
-}
-
const TestTarget = struct {
target: CrossTarget = @as(CrossTarget, .{}),
mode: std.builtin.Mode = .Debug,
@@ -736,7 +725,7 @@ pub const StackTracesContext = struct {
std.debug.print("Test {d}/{d} {s}...", .{ self.test_index + 1, self.context.test_index, self.name });
if (!std.process.can_spawn) {
- const cmd = try argvCmd(b.allocator, args.items);
+ const cmd = try std.mem.join(b.allocator, " ", args.items);
std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
b.allocator.free(cmd);
return ExecError.ExecNotSupported;
From 33fa29601921d88097a1ee3c0d92b93047a5186d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 6 Feb 2022 22:29:40 -0700
Subject: [PATCH 0063/2031] stage2: pass proper can_exit_early value to LLD
and adjust the warning message for invoking LLD twice in the same
process.
---
src/link/Coff.zig | 2 +-
src/link/Elf.zig | 2 +-
src/link/Wasm.zig | 2 +-
src/main.zig | 16 ++++++++++------
4 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 8426e5d50c..bc5837fe47 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1417,7 +1417,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
}
}
} else {
- const exit_code = try lldMain(arena, argv.items);
+ const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 2a550d26e6..afaf41a2f9 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2009,7 +2009,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
}
}
} else {
- const exit_code = try lldMain(arena, argv.items);
+ const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 91952468cc..e6988e9232 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -1545,7 +1545,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
}
}
} else {
- const exit_code = try lldMain(arena, argv.items);
+ const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
diff --git a/src/main.zig b/src/main.zig
index 6a1416cd6b..3f38fd1f78 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -236,7 +236,7 @@ pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
mem.eql(u8, cmd, "lld-link") or
mem.eql(u8, cmd, "wasm-ld"))
{
- return process.exit(try lldMain(arena, args));
+ return process.exit(try lldMain(arena, args, true));
} else if (mem.eql(u8, cmd, "build")) {
return cmdBuild(gpa, arena, cmd_args);
} else if (mem.eql(u8, cmd, "fmt")) {
@@ -4119,7 +4119,11 @@ pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}
/// * `ld.lld` - ELF
/// * `lld-link` - COFF
/// * `wasm-ld` - WebAssembly
-pub fn lldMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 {
+pub fn lldMain(
+ alloc: Allocator,
+ args: []const []const u8,
+ can_exit_early: bool,
+) error{OutOfMemory}!u8 {
if (!build_options.have_llvm)
fatal("`zig {s}` unavailable: compiler built without LLVM extensions", .{args[0]});
@@ -4130,7 +4134,7 @@ pub fn lldMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8
var count: usize = 0;
};
if (CallCounter.count == 1) { // Issue the warning on the first repeat call
- warn("calling lldMain repeatedly within the same process can have side effects (https://github.com/ziglang/zig/issues/3825)", .{});
+ warn("invoking LLD for the second time within the same process because the host OS ({s}) does not support spawning child processes. This sometimes activates LLD bugs", .{@tagName(builtin.os.tag)});
}
CallCounter.count += 1;
@@ -4145,11 +4149,11 @@ pub fn lldMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8
const llvm = @import("codegen/llvm/bindings.zig");
const argc = @intCast(c_int, argv.len);
if (mem.eql(u8, args[1], "ld.lld")) {
- break :rc llvm.LinkELF(argc, argv.ptr, true);
+ break :rc llvm.LinkELF(argc, argv.ptr, can_exit_early);
} else if (mem.eql(u8, args[1], "lld-link")) {
- break :rc llvm.LinkCOFF(argc, argv.ptr, true);
+ break :rc llvm.LinkCOFF(argc, argv.ptr, can_exit_early);
} else if (mem.eql(u8, args[1], "wasm-ld")) {
- break :rc llvm.LinkWasm(argc, argv.ptr, true);
+ break :rc llvm.LinkWasm(argc, argv.ptr, can_exit_early);
} else {
unreachable;
}
From 5944e89016219138f6d5d9c818c7ce323eb64c1d Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 5 Feb 2022 15:55:17 +0100
Subject: [PATCH 0064/2031] stage2: lower unnamed constants in Elf and MachO
* link: add a virtual function `lowerUnnamedConsts`, similar to
`updateFunc` or `updateDecl` which needs to be implemented by the
linker backend in order to be used with the `CodeGen` code
* elf: implement `lowerUnnamedConsts` specialization where we
lower unnamed constants to `.rodata` section. We keep track of the
atoms encompassing the lowered unnamed consts in a global table
indexed by parent `Decl`. When the `Decl` is updated or destroyed,
we clear the unnamed consts referenced within the `Decl`.
* macho: implement `lowerUnnamedConsts` specialization where we
lower unnamed constants to `__TEXT,__const` section. We keep track of the
atoms encompassing the lowered unnamed consts in a global table
indexed by parent `Decl`. When the `Decl` is updated or destroyed,
we clear the unnamed consts referenced within the `Decl`.
* x64: change `MCValue.linker_sym_index` into two `MCValue`s: `.got_load` and
`.direct_load`. The former signifies to the emitter that it should
emit a GOT load relocation, while the latter that it should emit
a direct load (`SIGNED`) relocation.
* x64: lower `struct` instantiations
---
src/arch/x86_64/CodeGen.zig | 91 ++++++---
src/arch/x86_64/Emit.zig | 71 ++++---
src/arch/x86_64/Mir.zig | 17 +-
src/arch/x86_64/PrintMir.zig | 38 ++--
src/link.zig | 20 ++
src/link/Coff.zig | 9 +
src/link/Elf.zig | 189 +++++++++++++++++--
src/link/MachO.zig | 351 +++++++++++++++++++++++++++--------
src/link/MachO/Atom.zig | 88 ++-------
src/link/Plan9.zig | 9 +
test/behavior/align.zig | 1 +
test/behavior/cast.zig | 23 ++-
test/behavior/struct.zig | 21 +++
test/stage2/x86_64.zig | 88 +++++++++
14 files changed, 770 insertions(+), 246 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index e5c7c99501..535e65000a 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -118,10 +118,14 @@ pub const MCValue = union(enum) {
/// The value is in memory at a hard-coded address.
/// If the type is a pointer, it means the pointer address is at this memory location.
memory: u64,
- /// The value is in memory but not allocated an address yet by the linker, so we store
- /// the symbol index instead.
- /// If the type is a pointer, it means the pointer is the symbol.
- linker_sym_index: u32,
+ /// The value is in memory referenced indirectly via a GOT entry index.
+ /// If the type is a pointer, it means the pointer is referenced indirectly via GOT.
+ /// When lowered, linker will emit a relocation of type X86_64_RELOC_GOT.
+ got_load: u32,
+ /// The value is in memory referenced directly via symbol index.
+ /// If the type is a pointer, it means the pointer is referenced directly via symbol index.
+ /// When lowered, linker will emit a relocation of type X86_64_RELOC_SIGNED.
+ direct_load: u32,
/// The value is one of the stack variables.
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
stack_offset: i32,
@@ -1691,7 +1695,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
},
.memory,
- .linker_sym_index,
+ .got_load,
+ .direct_load,
=> {
const reg = try self.copyToTmpRegister(ptr_ty, ptr);
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
@@ -1823,7 +1828,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
},
}
},
- .linker_sym_index,
+ .got_load,
+ .direct_load,
.memory,
=> {
value.freezeIfRegister(&self.register_manager);
@@ -1831,15 +1837,22 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const addr_reg: Register = blk: {
switch (ptr) {
- .linker_sym_index => |sym_index| {
+ .got_load,
+ .direct_load,
+ => |sym_index| {
+ const flags: u2 = switch (ptr) {
+ .got_load => 0b00,
+ .direct_load => 0b01,
+ else => unreachable,
+ };
const addr_reg = try self.register_manager.allocReg(null);
_ = try self.addInst(.{
- .tag = .lea,
+ .tag = .lea_pie,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
- .flags = 0b10,
+ .flags = flags,
}).encode(),
- .data = .{ .got_entry = sym_index },
+ .data = .{ .linker_sym_index = sym_index },
});
break :blk addr_reg;
},
@@ -2160,7 +2173,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
- .linker_sym_index => {
+ .got_load, .direct_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
},
.stack_offset => |off| {
@@ -2247,7 +2260,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
- .linker_sym_index => {
+ .got_load, .direct_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
},
.compare_flags_unsigned => {
@@ -2261,7 +2274,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{});
},
- .linker_sym_index => {
+ .got_load, .direct_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{});
},
}
@@ -2317,7 +2330,7 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
- .linker_sym_index => {
+ .got_load, .direct_load => {
return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
},
.compare_flags_unsigned => {
@@ -2358,7 +2371,7 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
- .linker_sym_index => {
+ .got_load, .direct_load => {
return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
},
.compare_flags_unsigned => {
@@ -2372,7 +2385,7 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 multiply destination memory", .{});
},
- .linker_sym_index => {
+ .got_load, .direct_load => {
return self.fail("TODO implement x86 multiply destination symbol at index in linker", .{});
},
}
@@ -2478,7 +2491,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
- .linker_sym_index => unreachable,
+ .got_load => unreachable,
+ .direct_load => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
@@ -2540,7 +2554,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
try self.genSetReg(Type.initTag(.usize), .rax, .{
- .linker_sym_index = func.owner_decl.link.macho.local_sym_index,
+ .got_load = func.owner_decl.link.macho.local_sym_index,
});
// callq *%rax
_ = try self.addInst(.{
@@ -3576,7 +3590,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
},
.memory,
.embedded_in_code,
- .linker_sym_index,
+ .got_load,
+ .direct_load,
=> {
if (ty.abiSize(self.target.*) <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
@@ -3982,14 +3997,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = undefined,
});
},
- .linker_sym_index => |sym_index| {
+ .got_load,
+ .direct_load,
+ => |sym_index| {
+ const flags: u2 = switch (mcv) {
+ .got_load => 0b00,
+ .direct_load => 0b01,
+ else => unreachable,
+ };
_ = try self.addInst(.{
- .tag = .lea,
+ .tag = .lea_pie,
.ops = (Mir.Ops{
.reg1 = reg,
- .flags = 0b10,
+ .flags = flags,
}).encode(),
- .data = .{ .got_entry = sym_index },
+ .data = .{ .linker_sym_index = sym_index },
});
// MOV reg, [reg]
_ = try self.addInst(.{
@@ -4316,7 +4338,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// Because MachO is PIE-always-on, we defer memory address resolution until
// the linker has enough info to perform relocations.
- return MCValue{ .linker_sym_index = decl.link.macho.local_sym_index };
+ return MCValue{ .got_load = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
@@ -4331,6 +4353,24 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
_ = tv;
}
+fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
+ const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
+ return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
+ };
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
+ return MCValue{ .memory = vaddr };
+ } else if (self.bin_file.cast(link.File.MachO)) |_| {
+ return MCValue{ .direct_load = local_sym_index };
+ } else if (self.bin_file.cast(link.File.Coff)) |_| {
+ return self.fail("TODO lower unnamed const in COFF", .{});
+ } else if (self.bin_file.cast(link.File.Plan9)) |_| {
+ return self.fail("TODO lower unnamed const in Plan9", .{});
+ } else {
+ return self.fail("TODO lower unnamed const", .{});
+ }
+}
+
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
@@ -4446,6 +4486,9 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
},
+ .Struct => {
+ return self.lowerUnnamedConst(typed_value);
+ },
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
}
}
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 274d1e86b5..be26354402 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -131,6 +131,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.movabs => try emit.mirMovabs(inst),
.lea => try emit.mirLea(inst),
+ .lea_pie => try emit.mirLeaPie(inst),
.imul_complex => try emit.mirIMulComplex(inst),
@@ -706,36 +707,6 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp);
},
0b10 => {
- // lea reg1, [rip + reloc]
- // RM
- try lowerToRmEnc(
- .lea,
- ops.reg1,
- RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
- emit.code,
- );
- const end_offset = emit.code.items.len;
- const got_entry = emit.mir.instructions.items(.data)[inst].got_entry;
- if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- // TODO I think the reloc might be in the wrong place.
- const decl = macho_file.active_decl.?;
- try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
- .offset = @intCast(u32, end_offset - 4),
- .target = .{ .local = got_entry },
- .addend = 0,
- .subtractor = null,
- .pcrel = true,
- .length = 2,
- .@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
- });
- } else {
- return emit.fail(
- "TODO implement lea reg, [rip + reloc] for linking backends different than MachO",
- .{},
- );
- }
- },
- 0b11 => {
// lea reg, [rbp + rcx + imm32]
const imm = emit.mir.instructions.items(.data)[inst].imm;
const src_reg: ?Register = if (ops.reg2 == .none) null else ops.reg2;
@@ -754,6 +725,46 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
emit.code,
);
},
+ 0b11 => return emit.fail("TODO unused LEA variant 0b11", .{}),
+ }
+}
+
+fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ assert(tag == .lea_pie);
+ const ops = Mir.Ops.decode(emit.mir.instructions.items(.ops)[inst]);
+
+ // lea reg1, [rip + reloc]
+ // RM
+ try lowerToRmEnc(
+ .lea,
+ ops.reg1,
+ RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
+ emit.code,
+ );
+ const end_offset = emit.code.items.len;
+ const reloc_type = switch (ops.flags) {
+ 0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
+ 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
+ else => return emit.fail("TODO unused LEA PIE variants 0b10 and 0b11", .{}),
+ };
+ const sym_index = emit.mir.instructions.items(.data)[inst].linker_sym_index;
+ if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
+ const decl = macho_file.active_decl.?;
+ try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
+ .offset = @intCast(u32, end_offset - 4),
+ .target = .{ .local = sym_index },
+ .addend = 0,
+ .subtractor = null,
+ .pcrel = true,
+ .length = 2,
+ .@"type" = reloc_type,
+ });
+ } else {
+ return emit.fail(
+ "TODO implement lea reg, [rip + reloc] for linking backends different than MachO",
+ .{},
+ );
}
}
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 181d4c92e0..2e8a9cf332 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -202,13 +202,16 @@ pub const Inst = struct {
/// 0b00 reg1, [reg2 + imm32]
/// 0b00 reg1, [ds:imm32]
/// 0b01 reg1, [rip + imm32]
- /// 0b10 reg1, [rip + reloc]
- /// 0b11 reg1, [reg2 + rcx + imm32]
- /// Notes:
- /// * if flags are 0b10, `Data` contains `got_entry` for the linker to generate
- /// a valid relocation for.
+ /// 0b10 reg1, [reg2 + rcx + imm32]
lea,
+ /// ops flags: form:
+ /// 0b00 reg1, [rip + reloc] // via GOT emits X86_64_RELOC_GOT relocation
+ /// 0b01 reg1, [rip + reloc] // direct load emits X86_64_RELOC_SIGNED relocation
+ /// Notes:
+ /// * `Data` contains `linker_sym_index`
+ lea_pie,
+
/// ops flags: form:
/// 0bX0 reg1
/// 0bX1 [reg1 + imm32]
@@ -342,8 +345,8 @@ pub const Inst = struct {
/// An extern function.
/// Index into the linker's string table.
extern_fn: u32,
- /// Entry in the GOT table by index.
- got_entry: u32,
+ /// Entry in the linker's symbol table.
+ linker_sym_index: u32,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
payload: u32,
};
diff --git a/src/arch/x86_64/PrintMir.zig b/src/arch/x86_64/PrintMir.zig
index 8f30802912..7c96b15210 100644
--- a/src/arch/x86_64/PrintMir.zig
+++ b/src/arch/x86_64/PrintMir.zig
@@ -119,6 +119,7 @@ pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap
.movabs => try print.mirMovabs(inst, w),
.lea => try print.mirLea(inst, w),
+ .lea_pie => try print.mirLeaPie(inst, w),
.imul_complex => try print.mirIMulComplex(inst, w),
@@ -412,7 +413,7 @@ fn mirLea(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
} else {
try w.print("ds:", .{});
}
- try w.print("{d}]\n", .{imm});
+ try w.print("{d}]", .{imm});
},
0b01 => {
try w.print("{s}, ", .{@tagName(ops.reg1)});
@@ -429,6 +430,7 @@ fn mirLea(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
try w.print("target@{x}", .{imm});
},
0b10 => {
+ const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("{s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
@@ -437,23 +439,37 @@ fn mirLea(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
- try w.print("[rip + 0x0] ", .{});
- const got_entry = print.mir.instructions.items(.data)[inst].got_entry;
- if (print.bin_file.cast(link.File.MachO)) |macho_file| {
- const target = macho_file.locals.items[got_entry];
- const target_name = macho_file.getString(target.n_strx);
- try w.print("target@{s}", .{target_name});
- } else {
- try w.writeAll("TODO lea reg, [rip + reloc] for linking backends different than MachO");
- }
+ try w.print("[rbp + rcx + {d}]", .{imm});
},
0b11 => {
- try w.writeAll("unused variant\n");
+ try w.writeAll("unused variant");
},
}
try w.writeAll("\n");
}
+fn mirLeaPie(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
+ const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
+ try w.print("lea {s}, ", .{@tagName(ops.reg1)});
+ switch (ops.reg1.size()) {
+ 8 => try w.print("byte ptr ", .{}),
+ 16 => try w.print("word ptr ", .{}),
+ 32 => try w.print("dword ptr ", .{}),
+ 64 => try w.print("qword ptr ", .{}),
+ else => unreachable,
+ }
+ try w.print("[rip + 0x0] ", .{});
+ const sym_index = print.mir.instructions.items(.data)[inst].linker_sym_index;
+ if (print.bin_file.cast(link.File.MachO)) |macho_file| {
+ const target = macho_file.locals.items[sym_index];
+ const target_name = macho_file.getString(target.n_strx);
+ try w.print("target@{s}", .{target_name});
+ } else {
+ try w.print("TODO lea PIE for other backends", .{});
+ }
+ return w.writeByte('\n');
+}
+
fn mirCallExtern(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
_ = print;
_ = inst;
diff --git a/src/link.zig b/src/link.zig
index 51e7082aa7..56b88bffef 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -17,6 +17,7 @@ const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const wasi_libc = @import("wasi_libc.zig");
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
+const TypedValue = @import("TypedValue.zig");
pub const SystemLib = struct {
needed: bool = false,
@@ -429,6 +430,25 @@ pub const File = struct {
CurrentWorkingDirectoryUnlinked,
};
+ /// Called from within the CodeGen to lower a local variable instantion as an unnamed
+ /// constant. Returns the symbol index of the lowered constant in the read-only section
+ /// of the final binary.
+ pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl: *Module.Decl) UpdateDeclError!u32 {
+ log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name });
+ switch (base.tag) {
+ // zig fmt: off
+ .coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl),
+ .elf => return @fieldParentPtr(Elf, "base", base).lowerUnnamedConst(tv, decl),
+ .macho => return @fieldParentPtr(MachO, "base", base).lowerUnnamedConst(tv, decl),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).lowerUnnamedConst(tv, decl),
+ .spirv => unreachable,
+ .c => unreachable,
+ .wasm => unreachable,
+ .nvptx => unreachable,
+ // zig fmt: on
+ }
+ }
+
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index bc5837fe47..2f500e6b91 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -21,6 +21,7 @@ const mingw = @import("../mingw.zig");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
+const TypedValue = @import("../TypedValue.zig");
const allocation_padding = 4 / 3;
const minimum_text_block_size = 64 * allocation_padding;
@@ -697,6 +698,14 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
return self.finishUpdateDecl(module, func.owner_decl, code);
}
+pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl: *Module.Decl) !u32 {
+ _ = self;
+ _ = tv;
+ _ = decl;
+ log.debug("TODO lowerUnnamedConst for Coff", .{});
+ return error.AnalysisFail;
+}
+
pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index afaf41a2f9..ea9556a952 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -19,6 +19,7 @@ const trace = @import("../tracy.zig").trace;
const Package = @import("../Package.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
+const TypedValue = @import("../TypedValue.zig");
const link = @import("../link.zig");
const File = link.File;
const build_options = @import("build_options");
@@ -110,6 +111,9 @@ debug_line_header_dirty: bool = false,
error_flags: File.ErrorFlags = File.ErrorFlags{},
+/// Pointer to the last allocated atom
+atoms: std.AutoHashMapUnmanaged(u16, *TextBlock) = .{},
+
/// A list of text blocks that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
/// or removed from the freelist.
@@ -125,10 +129,42 @@ error_flags: File.ErrorFlags = File.ErrorFlags{},
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh text block, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
-atoms: std.AutoHashMapUnmanaged(u16, *TextBlock) = .{},
atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock)) = .{},
+
+/// Table of Decls that are currently alive.
+/// We store them here so that we can properly dispose of any allocated
+/// memory within the atom in the incremental linker.
+/// TODO consolidate this.
decls: std.AutoHashMapUnmanaged(*Module.Decl, ?u16) = .{},
+/// List of atoms that are owned directly by the linker.
+/// Currently these are only atoms that are the result of linking
+/// object files. Atoms which take part in incremental linking are
+/// at present owned by Module.Decl.
+/// TODO consolidate this.
+managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
+
+/// Table of unnamed constants associated with a parent `Decl`.
+/// We store them here so that we can free the constants whenever the `Decl`
+/// needs updating or is freed.
+///
+/// For example,
+///
+/// ```zig
+/// const Foo = struct{
+/// a: u8,
+/// };
+///
+/// pub fn main() void {
+/// var foo = Foo{ .a = 1 };
+/// _ = foo;
+/// }
+/// ```
+///
+/// value assigned to label `foo` is an unnamed constant belonging/associated
+/// with `Decl` `main`, and lives as long as that `Decl`.
+unnamed_const_atoms: UnnamedConstTable = .{},
+
/// A list of `SrcFn` whose Line Number Programs have surplus capacity.
/// This is the same concept as `text_block_free_list`; see those doc comments.
dbg_line_fn_free_list: std.AutoHashMapUnmanaged(*SrcFn, void) = .{},
@@ -141,6 +177,8 @@ dbg_info_decl_free_list: std.AutoHashMapUnmanaged(*TextBlock, void) = .{},
dbg_info_decl_first: ?*TextBlock = null,
dbg_info_decl_last: ?*TextBlock = null,
+const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*TextBlock));
+
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
const ideal_factor = 3;
@@ -342,6 +380,19 @@ pub fn deinit(self: *Elf) void {
}
self.atom_free_lists.deinit(self.base.allocator);
}
+
+ for (self.managed_atoms.items) |atom| {
+ self.base.allocator.destroy(atom);
+ }
+ self.managed_atoms.deinit(self.base.allocator);
+
+ {
+ var it = self.unnamed_const_atoms.valueIterator();
+ while (it.next()) |atoms| {
+ atoms.deinit(self.base.allocator);
+ }
+ self.unnamed_const_atoms.deinit(self.base.allocator);
+ }
}
pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl) u64 {
@@ -2166,6 +2217,11 @@ fn writeElfHeader(self: *Elf) !void {
}
fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
+ const local_sym = self.local_symbols.items[text_block.local_sym_index];
+ const name_str_index = local_sym.st_name;
+ const name = self.getString(name_str_index);
+ log.debug("freeTextBlock {*} ({s})", .{ text_block, name });
+
const free_list = self.atom_free_lists.getPtr(phdr_index).?;
var already_have_free_list_node = false;
{
@@ -2376,23 +2432,43 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
return vaddr;
}
+fn allocateLocalSymbol(self: *Elf) !u32 {
+ try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
+
+ const index = blk: {
+ if (self.local_symbol_free_list.popOrNull()) |index| {
+ log.debug(" (reusing symbol index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating symbol index {d})", .{self.local_symbols.items.len});
+ const index = @intCast(u32, self.local_symbols.items.len);
+ _ = self.local_symbols.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
+
+ self.local_symbols.items[index] = .{
+ .st_name = 0,
+ .st_info = 0,
+ .st_other = 0,
+ .st_shndx = 0,
+ .st_value = 0,
+ .st_size = 0,
+ };
+
+ return index;
+}
+
pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
if (self.llvm_object) |_| return;
if (decl.link.elf.local_sym_index != 0) return;
- try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
try self.decls.putNoClobber(self.base.allocator, decl, null);
- if (self.local_symbol_free_list.popOrNull()) |i| {
- log.debug("reusing symbol index {d} for {s}", .{ i, decl.name });
- decl.link.elf.local_sym_index = i;
- } else {
- log.debug("allocating symbol index {d} for {s}", .{ self.local_symbols.items.len, decl.name });
- decl.link.elf.local_sym_index = @intCast(u32, self.local_symbols.items.len);
- _ = self.local_symbols.addOneAssumeCapacity();
- }
+ log.debug("allocating symbol indexes for {s}", .{decl.name});
+ decl.link.elf.local_sym_index = try self.allocateLocalSymbol();
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.elf.offset_table_index = i;
@@ -2401,18 +2477,19 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
_ = self.offset_table.addOneAssumeCapacity();
self.offset_table_count_dirty = true;
}
-
- self.local_symbols.items[decl.link.elf.local_sym_index] = .{
- .st_name = 0,
- .st_info = 0,
- .st_other = 0,
- .st_shndx = 0,
- .st_value = 0,
- .st_size = 0,
- };
self.offset_table.items[decl.link.elf.offset_table_index] = 0;
}
+fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void {
+ const unnamed_consts = self.unnamed_const_atoms.getPtr(decl) orelse return;
+ for (unnamed_consts.items) |atom| {
+ self.freeTextBlock(atom, self.phdr_load_ro_index.?);
+ self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
+ self.local_symbols.items[atom.local_sym_index].st_info = 0;
+ }
+ unnamed_consts.clearAndFree(self.base.allocator);
+}
+
pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
@@ -2421,6 +2498,7 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
const kv = self.decls.fetchRemove(decl);
if (kv.?.value) |index| {
self.freeTextBlock(&decl.link.elf, index);
+ self.freeUnnamedConsts(decl);
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -2528,7 +2606,6 @@ fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8
const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
errdefer self.freeTextBlock(&decl.link.elf, phdr_index);
log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
- errdefer self.freeTextBlock(&decl.link.elf, phdr_index);
local_sym.* = .{
.st_name = name_str_index,
@@ -2632,6 +2709,8 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs);
const decl = func.owner_decl;
+ self.freeUnnamedConsts(decl);
+
log.debug("updateFunc {s}{*}", .{ decl.name, func.owner_decl });
log.debug(" (decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d})", .{
decl.src_line,
@@ -2859,6 +2938,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
}
}
+ assert(!self.unnamed_const_atoms.contains(decl));
+
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2897,6 +2978,74 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer);
}
+pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl) !u32 {
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ const module = self.base.options.module.?;
+ const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ const unnamed_consts = gop.value_ptr;
+
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
+ .none = .{},
+ });
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return error.AnalysisFail;
+ },
+ };
+
+ const atom = try self.base.allocator.create(TextBlock);
+ errdefer self.base.allocator.destroy(atom);
+ atom.* = TextBlock.empty;
+ try self.managed_atoms.append(self.base.allocator, atom);
+
+ const name_str_index = blk: {
+ const index = unnamed_consts.items.len;
+ const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl.name, index });
+ defer self.base.allocator.free(name);
+ break :blk try self.makeString(name);
+ };
+ const name = self.getString(name_str_index);
+
+ log.debug("allocating symbol indexes for {s}", .{name});
+ atom.local_sym_index = try self.allocateLocalSymbol();
+
+ const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const phdr_index = self.phdr_load_ro_index.?;
+ const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ const vaddr = try self.allocateTextBlock(atom, code.len, required_alignment, phdr_index);
+ errdefer self.freeTextBlock(atom, phdr_index);
+
+ log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
+
+ const local_sym = &self.local_symbols.items[atom.local_sym_index];
+ local_sym.* = .{
+ .st_name = name_str_index,
+ .st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT,
+ .st_other = 0,
+ .st_shndx = shdr_index,
+ .st_value = vaddr,
+ .st_size = code.len,
+ };
+
+ try self.writeSymbol(atom.local_sym_index);
+ try unnamed_consts.append(self.base.allocator, atom);
+
+ const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
+ const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ try self.base.file.?.pwriteAll(code, file_offset);
+
+ return atom.local_sym_index;
+}
+
/// Asserts the type has codegen bits.
fn addDbgInfoType(
self: *Elf,
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index be7a9d0f85..065145cdc8 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -39,6 +39,7 @@ const StringIndexAdapter = std.hash_map.StringIndexAdapter;
const StringIndexContext = std.hash_map.StringIndexContext;
const Trie = @import("MachO/Trie.zig");
const Type = @import("../type.zig").Type;
+const TypedValue = @import("../TypedValue.zig");
pub const TextBlock = Atom;
@@ -166,14 +167,17 @@ stub_helper_preamble_atom: ?*Atom = null,
strtab: std.ArrayListUnmanaged(u8) = .{},
strtab_dir: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
-tlv_ptr_entries_map: std.AutoArrayHashMapUnmanaged(Atom.Relocation.Target, *Atom) = .{},
-tlv_ptr_entries_map_free_list: std.ArrayListUnmanaged(u32) = .{},
+tlv_ptr_entries: std.ArrayListUnmanaged(Entry) = .{},
+tlv_ptr_entries_free_list: std.ArrayListUnmanaged(u32) = .{},
+tlv_ptr_entries_table: std.AutoArrayHashMapUnmanaged(Atom.Relocation.Target, u32) = .{},
-got_entries_map: std.AutoArrayHashMapUnmanaged(Atom.Relocation.Target, *Atom) = .{},
-got_entries_map_free_list: std.ArrayListUnmanaged(u32) = .{},
+got_entries: std.ArrayListUnmanaged(Entry) = .{},
+got_entries_free_list: std.ArrayListUnmanaged(u32) = .{},
+got_entries_table: std.AutoArrayHashMapUnmanaged(Atom.Relocation.Target, u32) = .{},
-stubs_map: std.AutoArrayHashMapUnmanaged(u32, *Atom) = .{},
-stubs_map_free_list: std.ArrayListUnmanaged(u32) = .{},
+stubs: std.ArrayListUnmanaged(*Atom) = .{},
+stubs_free_list: std.ArrayListUnmanaged(u32) = .{},
+stubs_table: std.AutoArrayHashMapUnmanaged(u32, u32) = .{},
error_flags: File.ErrorFlags = File.ErrorFlags{},
@@ -217,6 +221,27 @@ atoms: std.AutoHashMapUnmanaged(MatchingSection, *Atom) = .{},
/// TODO consolidate this.
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+/// Table of unnamed constants associated with a parent `Decl`.
+/// We store them here so that we can free the constants whenever the `Decl`
+/// needs updating or is freed.
+///
+/// For example,
+///
+/// ```zig
+/// const Foo = struct{
+/// a: u8,
+/// };
+///
+/// pub fn main() void {
+/// var foo = Foo{ .a = 1 };
+/// _ = foo;
+/// }
+/// ```
+///
+/// value assigned to label `foo` is an unnamed constant belonging/associated
+/// with `Decl` `main`, and lives as long as that `Decl`.
+unnamed_const_atoms: UnnamedConstTable = .{},
+
/// Table of Decls that are currently alive.
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
@@ -229,6 +254,13 @@ decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, ?MatchingSection) = .{},
/// somewhere else in the codegen.
active_decl: ?*Module.Decl = null,
+const Entry = struct {
+ target: Atom.Relocation.Target,
+ atom: *Atom,
+};
+
+const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*Atom));
+
const PendingUpdate = union(enum) {
resolve_undef: u32,
add_stub_entry: u32,
@@ -661,16 +693,15 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
sym.n_desc = 0;
},
}
- if (self.got_entries_map.getIndex(.{ .global = entry.key })) |i| {
- self.got_entries_map_free_list.append(
- self.base.allocator,
- @intCast(u32, i),
- ) catch {};
- self.got_entries_map.keys()[i] = .{ .local = 0 };
+ if (self.got_entries_table.get(.{ .global = entry.key })) |i| {
+ self.got_entries_free_list.append(self.base.allocator, @intCast(u32, i)) catch {};
+ self.got_entries.items[i] = .{ .target = .{ .local = 0 }, .atom = undefined };
+ _ = self.got_entries_table.swapRemove(.{ .global = entry.key });
}
- if (self.stubs_map.getIndex(entry.key)) |i| {
- self.stubs_map_free_list.append(self.base.allocator, @intCast(u32, i)) catch {};
- self.stubs_map.keys()[i] = 0;
+ if (self.stubs_table.get(entry.key)) |i| {
+ self.stubs_free_list.append(self.base.allocator, @intCast(u32, i)) catch {};
+ self.stubs.items[i] = undefined;
+ _ = self.stubs_table.swapRemove(entry.key);
}
}
}
@@ -2948,7 +2979,7 @@ fn resolveSymbolsInDylibs(self: *MachO) !void {
.none => {},
.got => return error.TODOGotHint,
.stub => {
- if (self.stubs_map.contains(sym.n_strx)) break :outer_blk;
+ if (self.stubs_table.contains(sym.n_strx)) break :outer_blk;
const stub_helper_atom = blk: {
const match = MatchingSection{
.seg = self.text_segment_cmd_index.?,
@@ -2991,7 +3022,9 @@ fn resolveSymbolsInDylibs(self: *MachO) !void {
atom_sym.n_sect = @intCast(u8, self.section_ordinals.getIndex(match).? + 1);
break :blk atom;
};
- try self.stubs_map.putNoClobber(self.base.allocator, sym.n_strx, stub_atom);
+ const stub_index = @intCast(u32, self.stubs.items.len);
+ try self.stubs.append(self.base.allocator, stub_atom);
+ try self.stubs_table.putNoClobber(self.base.allocator, sym.n_strx, stub_index);
},
}
}
@@ -3086,7 +3119,9 @@ fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const target = Atom.Relocation.Target{ .global = n_strx };
const atom = try self.createGotAtom(target);
- try self.got_entries_map.putNoClobber(self.base.allocator, target, atom);
+ const got_index = @intCast(u32, self.got_entries.items.len);
+ try self.got_entries.append(self.base.allocator, .{ .target = target, .atom = atom });
+ try self.got_entries_table.putNoClobber(self.base.allocator, target, got_index);
const match = MatchingSection{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.got_section_index.?,
@@ -3339,12 +3374,15 @@ pub fn deinit(self: *MachO) void {
}
self.section_ordinals.deinit(self.base.allocator);
- self.tlv_ptr_entries_map.deinit(self.base.allocator);
- self.tlv_ptr_entries_map_free_list.deinit(self.base.allocator);
- self.got_entries_map.deinit(self.base.allocator);
- self.got_entries_map_free_list.deinit(self.base.allocator);
- self.stubs_map.deinit(self.base.allocator);
- self.stubs_map_free_list.deinit(self.base.allocator);
+ self.tlv_ptr_entries.deinit(self.base.allocator);
+ self.tlv_ptr_entries_free_list.deinit(self.base.allocator);
+ self.tlv_ptr_entries_table.deinit(self.base.allocator);
+ self.got_entries.deinit(self.base.allocator);
+ self.got_entries_free_list.deinit(self.base.allocator);
+ self.got_entries_table.deinit(self.base.allocator);
+ self.stubs.deinit(self.base.allocator);
+ self.stubs_free_list.deinit(self.base.allocator);
+ self.stubs_table.deinit(self.base.allocator);
self.strtab_dir.deinit(self.base.allocator);
self.strtab.deinit(self.base.allocator);
self.undefs.deinit(self.base.allocator);
@@ -3395,6 +3433,14 @@ pub fn deinit(self: *MachO) void {
decl.link.macho.deinit(self.base.allocator);
}
self.decls.deinit(self.base.allocator);
+
+ {
+ var it = self.unnamed_const_atoms.valueIterator();
+ while (it.next()) |atoms| {
+ atoms.deinit(self.base.allocator);
+ }
+ self.unnamed_const_atoms.deinit(self.base.allocator);
+ }
}
pub fn closeFiles(self: MachO) void {
@@ -3409,9 +3455,11 @@ pub fn closeFiles(self: MachO) void {
}
}
-fn freeAtom(self: *MachO, atom: *Atom, match: MatchingSection) void {
+fn freeAtom(self: *MachO, atom: *Atom, match: MatchingSection, owns_atom: bool) void {
log.debug("freeAtom {*}", .{atom});
- atom.deinit(self.base.allocator);
+ if (!owns_atom) {
+ atom.deinit(self.base.allocator);
+ }
const free_list = self.atom_free_lists.getPtr(match).?;
var already_have_free_list_node = false;
@@ -3502,23 +3550,22 @@ fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64, match
return self.allocateAtom(atom, new_atom_size, alignment, match);
}
-pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
- if (self.llvm_object) |_| return;
- if (decl.link.macho.local_sym_index != 0) return;
-
+fn allocateLocalSymbol(self: *MachO) !u32 {
try self.locals.ensureUnusedCapacity(self.base.allocator, 1);
- try self.decls.putNoClobber(self.base.allocator, decl, null);
- if (self.locals_free_list.popOrNull()) |i| {
- log.debug("reusing symbol index {d} for {s}", .{ i, decl.name });
- decl.link.macho.local_sym_index = i;
- } else {
- log.debug("allocating symbol index {d} for {s}", .{ self.locals.items.len, decl.name });
- decl.link.macho.local_sym_index = @intCast(u32, self.locals.items.len);
- _ = self.locals.addOneAssumeCapacity();
- }
+ const index = blk: {
+ if (self.locals_free_list.popOrNull()) |index| {
+ log.debug(" (reusing symbol index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
+ const index = @intCast(u32, self.locals.items.len);
+ _ = self.locals.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
- self.locals.items[decl.link.macho.local_sym_index] = .{
+ self.locals.items[index] = .{
.n_strx = 0,
.n_type = 0,
.n_sect = 0,
@@ -3526,24 +3573,86 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
.n_value = 0,
};
- // TODO try popping from free list first before allocating a new GOT atom.
- const target = Atom.Relocation.Target{ .local = decl.link.macho.local_sym_index };
- const value_ptr = blk: {
- if (self.got_entries_map_free_list.popOrNull()) |i| {
- log.debug("reusing GOT entry index {d} for {s}", .{ i, decl.name });
- self.got_entries_map.keys()[i] = target;
- const value_ptr = self.got_entries_map.getPtr(target).?;
- break :blk value_ptr;
+ return index;
+}
+
+pub fn allocateGotEntry(self: *MachO, target: Atom.Relocation.Target) !u32 {
+ try self.got_entries.ensureUnusedCapacity(self.base.allocator, 1);
+
+ const index = blk: {
+ if (self.got_entries_free_list.popOrNull()) |index| {
+ log.debug(" (reusing GOT entry index {d})", .{index});
+ break :blk index;
} else {
- const res = try self.got_entries_map.getOrPut(self.base.allocator, target);
- log.debug("creating new GOT entry at index {d} for {s}", .{
- self.got_entries_map.getIndex(target).?,
- decl.name,
- });
- break :blk res.value_ptr;
+ log.debug(" (allocating GOT entry at index {d})", .{self.got_entries.items.len});
+ const index = @intCast(u32, self.got_entries.items.len);
+ _ = self.got_entries.addOneAssumeCapacity();
+ break :blk index;
}
};
- value_ptr.* = try self.createGotAtom(target);
+
+ self.got_entries.items[index] = .{
+ .target = target,
+ .atom = undefined,
+ };
+ try self.got_entries_table.putNoClobber(self.base.allocator, target, index);
+
+ return index;
+}
+
+pub fn allocateStubEntry(self: *MachO, n_strx: u32) !u32 {
+ try self.stubs.ensureUnusedCapacity(self.base.allocator, 1);
+
+ const index = blk: {
+ if (self.stubs_free_list.popOrNull()) |index| {
+ log.debug(" (reusing stub entry index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating stub entry at index {d})", .{self.stubs.items.len});
+ const index = @intCast(u32, self.stubs.items.len);
+ _ = self.stubs.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
+
+ self.stubs.items[index] = undefined;
+ try self.stubs_table.putNoClobber(self.base.allocator, n_strx, index);
+
+ return index;
+}
+
+pub fn allocateTlvPtrEntry(self: *MachO, target: Atom.Relocation.Target) !u32 {
+ try self.tlv_ptr_entries.ensureUnusedCapacity(self.base.allocator, 1);
+
+ const index = blk: {
+ if (self.tlv_ptr_entries_free_list.popOrNull()) |index| {
+ log.debug(" (reusing TLV ptr entry index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating TLV ptr entry at index {d})", .{self.tlv_ptr_entries.items.len});
+ const index = @intCast(u32, self.tlv_ptr_entries.items.len);
+ _ = self.tlv_ptr_entries.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
+
+ self.tlv_ptr_entries.items[index] = .{ .target = target, .atom = undefined };
+ try self.tlv_ptr_entries_table.putNoClobber(self.base.allocator, target, index);
+
+ return index;
+}
+
+pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
+ if (self.llvm_object) |_| return;
+ if (decl.link.macho.local_sym_index != 0) return;
+
+ decl.link.macho.local_sym_index = try self.allocateLocalSymbol();
+ try self.decls.putNoClobber(self.base.allocator, decl, null);
+
+ const got_target = .{ .local = decl.link.macho.local_sym_index };
+ const got_index = try self.allocateGotEntry(got_target);
+ const got_atom = try self.createGotAtom(got_target);
+ self.got_entries.items[got_index].atom = got_atom;
}
pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@@ -3557,6 +3666,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
defer tracy.end();
const decl = func.owner_decl;
+ self.freeUnnamedConsts(decl);
// TODO clearing the code and relocs buffer should probably be orchestrated
// in a different, smarter, more automatic way somewhere else, in a more centralised
// way than this.
@@ -3624,6 +3734,70 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
try self.updateDeclExports(module, decl, decl_exports);
}
+pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.Decl) !u32 {
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ const module = self.base.options.module.?;
+ const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ const unnamed_consts = gop.value_ptr;
+
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
+ .none = .{},
+ });
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return error.AnalysisFail;
+ },
+ };
+
+ const name_str_index = blk: {
+ const index = unnamed_consts.items.len;
+ const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl.name, index });
+ defer self.base.allocator.free(name);
+ break :blk try self.makeString(name);
+ };
+ const name = self.getString(name_str_index);
+
+ log.debug("allocating symbol indexes for {s}", .{name});
+
+ const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const match = (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = math.log2(required_alignment),
+ })).?;
+ const local_sym_index = try self.allocateLocalSymbol();
+ const atom = try self.createEmptyAtom(local_sym_index, code.len, math.log2(required_alignment));
+ mem.copy(u8, atom.code.items, code);
+ const addr = try self.allocateAtom(atom, code.len, required_alignment, match);
+
+ log.debug("allocated atom for {s} at 0x{x}", .{ name, addr });
+
+ errdefer self.freeAtom(atom, match, true);
+
+ const symbol = &self.locals.items[atom.local_sym_index];
+ symbol.* = .{
+ .n_strx = name_str_index,
+ .n_type = macho.N_SECT,
+ .n_sect = @intCast(u8, self.section_ordinals.getIndex(match).?) + 1,
+ .n_desc = 0,
+ .n_value = addr,
+ };
+
+ try unnamed_consts.append(self.base.allocator, atom);
+
+ return atom.local_sym_index;
+}
+
pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@@ -3879,7 +4053,8 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
if (vaddr != symbol.n_value) {
log.debug(" (writing new GOT entry)", .{});
- const got_atom = self.got_entries_map.get(.{ .local = decl.link.macho.local_sym_index }).?;
+ const got_index = self.got_entries_table.get(.{ .local = decl.link.macho.local_sym_index }).?;
+ const got_atom = self.got_entries.items[got_index].atom;
const got_sym = &self.locals.items[got_atom.local_sym_index];
const got_vaddr = try self.allocateAtom(got_atom, @sizeOf(u64), 8, .{
.seg = self.data_const_segment_cmd_index.?,
@@ -3920,7 +4095,7 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, addr });
- errdefer self.freeAtom(&decl.link.macho, match);
+ errdefer self.freeAtom(&decl.link.macho, match, false);
symbol.* = .{
.n_strx = name_str_index,
@@ -3929,7 +4104,8 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
.n_desc = 0,
.n_value = addr,
};
- const got_atom = self.got_entries_map.get(.{ .local = decl.link.macho.local_sym_index }).?;
+ const got_index = self.got_entries_table.get(.{ .local = decl.link.macho.local_sym_index }).?;
+ const got_atom = self.got_entries.items[got_index].atom;
const got_sym = &self.locals.items[got_atom.local_sym_index];
const vaddr = try self.allocateAtom(got_atom, @sizeOf(u64), 8, .{
.seg = self.data_const_segment_cmd_index.?,
@@ -4103,6 +4279,19 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
global.n_value = 0;
}
+fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void {
+ const unnamed_consts = self.unnamed_const_atoms.getPtr(decl) orelse return;
+ for (unnamed_consts.items) |atom| {
+ self.freeAtom(atom, .{
+ .seg = self.text_segment_cmd_index.?,
+ .sect = self.text_const_section_index.?,
+ }, true);
+ self.locals_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
+ self.locals.items[atom.local_sym_index].n_type = 0;
+ }
+ unnamed_consts.clearAndFree(self.base.allocator);
+}
+
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
@@ -4110,15 +4299,19 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
log.debug("freeDecl {*}", .{decl});
const kv = self.decls.fetchSwapRemove(decl);
if (kv.?.value) |match| {
- self.freeAtom(&decl.link.macho, match);
+ self.freeAtom(&decl.link.macho, match, false);
+ self.freeUnnamedConsts(decl);
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
if (decl.link.macho.local_sym_index != 0) {
self.locals_free_list.append(self.base.allocator, decl.link.macho.local_sym_index) catch {};
- // Try freeing GOT atom
- const got_index = self.got_entries_map.getIndex(.{ .local = decl.link.macho.local_sym_index }).?;
- self.got_entries_map_free_list.append(self.base.allocator, @intCast(u32, got_index)) catch {};
+ // Try freeing GOT atom if this decl had one
+ if (self.got_entries_table.get(.{ .local = decl.link.macho.local_sym_index })) |got_index| {
+ self.got_entries_free_list.append(self.base.allocator, @intCast(u32, got_index)) catch {};
+ self.got_entries.items[got_index] = .{ .target = .{ .local = 0 }, .atom = undefined };
+ _ = self.got_entries_table.swapRemove(.{ .local = decl.link.macho.local_sym_index });
+ }
self.locals.items[decl.link.macho.local_sym_index].n_type = 0;
decl.link.macho.local_sym_index = 0;
@@ -5932,8 +6125,8 @@ fn writeSymbolTable(self: *MachO) !void {
const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].segment;
const la_symbol_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?];
- const nstubs = @intCast(u32, self.stubs_map.keys().len);
- const ngot_entries = @intCast(u32, self.got_entries_map.keys().len);
+ const nstubs = @intCast(u32, self.stubs_table.keys().len);
+ const ngot_entries = @intCast(u32, self.got_entries_table.keys().len);
dysymtab.indirectsymoff = @intCast(u32, seg.inner.fileoff + seg.inner.filesize);
dysymtab.nindirectsyms = nstubs * 2 + ngot_entries;
@@ -5953,7 +6146,7 @@ fn writeSymbolTable(self: *MachO) !void {
var writer = stream.writer();
stubs.reserved1 = 0;
- for (self.stubs_map.keys()) |key| {
+ for (self.stubs_table.keys()) |key| {
const resolv = self.symbol_resolver.get(key).?;
switch (resolv.where) {
.global => try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL),
@@ -5962,7 +6155,7 @@ fn writeSymbolTable(self: *MachO) !void {
}
got.reserved1 = nstubs;
- for (self.got_entries_map.keys()) |key| {
+ for (self.got_entries_table.keys()) |key| {
switch (key) {
.local => try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL),
.global => |n_strx| {
@@ -5976,7 +6169,7 @@ fn writeSymbolTable(self: *MachO) !void {
}
la_symbol_ptr.reserved1 = got.reserved1 + ngot_entries;
- for (self.stubs_map.keys()) |key| {
+ for (self.stubs_table.keys()) |key| {
const resolv = self.symbol_resolver.get(key).?;
switch (resolv.where) {
.global => try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL),
@@ -6348,7 +6541,7 @@ fn snapshotState(self: *MachO) !void {
};
if (is_via_got) {
- const got_atom = self.got_entries_map.get(rel.target) orelse break :blk 0;
+ const got_atom = self.got_entries_table.get(rel.target) orelse break :blk 0;
break :blk self.locals.items[got_atom.local_sym_index].n_value;
}
@@ -6380,10 +6573,11 @@ fn snapshotState(self: *MachO) !void {
switch (resolv.where) {
.global => break :blk self.globals.items[resolv.where_index].n_value,
.undef => {
- break :blk if (self.stubs_map.get(n_strx)) |stub_atom|
- self.locals.items[stub_atom.local_sym_index].n_value
- else
- 0;
+ if (self.stubs_table.get(n_strx)) |stub_index| {
+ const stub_atom = self.stubs.items[stub_index];
+ break :blk self.locals.items[stub_atom.local_sym_index].n_value;
+ }
+ break :blk 0;
},
}
},
@@ -6508,15 +6702,20 @@ fn logSymtab(self: MachO) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries_map.keys()) |key| {
+ for (self.got_entries_table.values()) |value| {
+ const key = self.got_entries.items[value].target;
+ const atom = self.got_entries.items[value].atom;
switch (key) {
- .local => |sym_index| log.debug(" {} => {d}", .{ key, sym_index }),
+ .local => {
+ const sym = self.locals.items[atom.local_sym_index];
+ log.debug(" {} => {s}", .{ key, self.getString(sym.n_strx) });
+ },
.global => |n_strx| log.debug(" {} => {s}", .{ key, self.getString(n_strx) }),
}
}
log.debug("__thread_ptrs entries:", .{});
- for (self.tlv_ptr_entries_map.keys()) |key| {
+ for (self.tlv_ptr_entries_table.keys()) |key| {
switch (key) {
.local => unreachable,
.global => |n_strx| log.debug(" {} => {s}", .{ key, self.getString(n_strx) }),
@@ -6524,7 +6723,7 @@ fn logSymtab(self: MachO) void {
}
log.debug("stubs:", .{});
- for (self.stubs_map.keys()) |key| {
+ for (self.stubs_table.keys()) |key| {
log.debug(" {} => {s}", .{ key, self.getString(key) });
}
}
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 2b16bc8cb0..fae1ff4eba 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -545,28 +545,11 @@ fn addPtrBindingOrRebase(
}
fn addTlvPtrEntry(target: Relocation.Target, context: RelocContext) !void {
- if (context.macho_file.tlv_ptr_entries_map.contains(target)) return;
+ if (context.macho_file.tlv_ptr_entries_table.contains(target)) return;
- const value_ptr = blk: {
- if (context.macho_file.tlv_ptr_entries_map_free_list.popOrNull()) |i| {
- log.debug("reusing __thread_ptrs entry index {d} for {}", .{ i, target });
- context.macho_file.tlv_ptr_entries_map.keys()[i] = target;
- const value_ptr = context.macho_file.tlv_ptr_entries_map.getPtr(target).?;
- break :blk value_ptr;
- } else {
- const res = try context.macho_file.tlv_ptr_entries_map.getOrPut(
- context.macho_file.base.allocator,
- target,
- );
- log.debug("creating new __thread_ptrs entry at index {d} for {}", .{
- context.macho_file.tlv_ptr_entries_map.getIndex(target).?,
- target,
- });
- break :blk res.value_ptr;
- }
- };
+ const index = try context.macho_file.allocateTlvPtrEntry(target);
const atom = try context.macho_file.createTlvPtrAtom(target);
- value_ptr.* = atom;
+ context.macho_file.tlv_ptr_entries.items[index].atom = atom;
const match = (try context.macho_file.getMatchingSection(.{
.segname = MachO.makeStaticString("__DATA"),
@@ -586,28 +569,11 @@ fn addTlvPtrEntry(target: Relocation.Target, context: RelocContext) !void {
}
fn addGotEntry(target: Relocation.Target, context: RelocContext) !void {
- if (context.macho_file.got_entries_map.contains(target)) return;
+ if (context.macho_file.got_entries_table.contains(target)) return;
- const value_ptr = blk: {
- if (context.macho_file.got_entries_map_free_list.popOrNull()) |i| {
- log.debug("reusing GOT entry index {d} for {}", .{ i, target });
- context.macho_file.got_entries_map.keys()[i] = target;
- const value_ptr = context.macho_file.got_entries_map.getPtr(target).?;
- break :blk value_ptr;
- } else {
- const res = try context.macho_file.got_entries_map.getOrPut(
- context.macho_file.base.allocator,
- target,
- );
- log.debug("creating new GOT entry at index {d} for {}", .{
- context.macho_file.got_entries_map.getIndex(target).?,
- target,
- });
- break :blk res.value_ptr;
- }
- };
+ const index = try context.macho_file.allocateGotEntry(target);
const atom = try context.macho_file.createGotAtom(target);
- value_ptr.* = atom;
+ context.macho_file.got_entries.items[index].atom = atom;
const match = MachO.MatchingSection{
.seg = context.macho_file.data_const_segment_cmd_index.?,
@@ -627,30 +593,13 @@ fn addGotEntry(target: Relocation.Target, context: RelocContext) !void {
fn addStub(target: Relocation.Target, context: RelocContext) !void {
if (target != .global) return;
- if (context.macho_file.stubs_map.contains(target.global)) return;
+ if (context.macho_file.stubs_table.contains(target.global)) return;
// If the symbol has been resolved as defined globally elsewhere (in a different translation unit),
// then skip creating stub entry.
// TODO Is this the correct for the incremental?
if (context.macho_file.symbol_resolver.get(target.global).?.where == .global) return;
- const value_ptr = blk: {
- if (context.macho_file.stubs_map_free_list.popOrNull()) |i| {
- log.debug("reusing stubs entry index {d} for {}", .{ i, target });
- context.macho_file.stubs_map.keys()[i] = target.global;
- const value_ptr = context.macho_file.stubs_map.getPtr(target.global).?;
- break :blk value_ptr;
- } else {
- const res = try context.macho_file.stubs_map.getOrPut(
- context.macho_file.base.allocator,
- target.global,
- );
- log.debug("creating new stubs entry at index {d} for {}", .{
- context.macho_file.stubs_map.getIndex(target.global).?,
- target,
- });
- break :blk res.value_ptr;
- }
- };
+ const stub_index = try context.macho_file.allocateStubEntry(target.global);
// TODO clean this up!
const stub_helper_atom = atom: {
@@ -707,7 +656,7 @@ fn addStub(target: Relocation.Target, context: RelocContext) !void {
} else {
try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
}
- value_ptr.* = atom;
+ context.macho_file.stubs.items[stub_index] = atom;
}
pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
@@ -741,7 +690,7 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
};
if (is_via_got) {
- const atom = macho_file.got_entries_map.get(rel.target) orelse {
+ const got_index = macho_file.got_entries_table.get(rel.target) orelse {
const n_strx = switch (rel.target) {
.local => |sym_index| macho_file.locals.items[sym_index].n_strx,
.global => |n_strx| n_strx,
@@ -750,6 +699,7 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
log.err(" this is an internal linker error", .{});
return error.FailedToResolveRelocationTarget;
};
+ const atom = macho_file.got_entries.items[got_index].atom;
break :blk macho_file.locals.items[atom.local_sym_index].n_value;
}
@@ -795,15 +745,17 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
switch (resolv.where) {
.global => break :blk macho_file.globals.items[resolv.where_index].n_value,
.undef => {
- break :blk if (macho_file.stubs_map.get(n_strx)) |atom|
- macho_file.locals.items[atom.local_sym_index].n_value
- else inner: {
- if (macho_file.tlv_ptr_entries_map.get(rel.target)) |atom| {
+ if (macho_file.stubs_table.get(n_strx)) |stub_index| {
+ const atom = macho_file.stubs.items[stub_index];
+ break :blk macho_file.locals.items[atom.local_sym_index].n_value;
+ } else {
+ if (macho_file.tlv_ptr_entries_table.get(rel.target)) |tlv_ptr_index| {
is_via_thread_ptrs = true;
- break :inner macho_file.locals.items[atom.local_sym_index].n_value;
+ const atom = macho_file.tlv_ptr_entries.items[tlv_ptr_index].atom;
+ break :blk macho_file.locals.items[atom.local_sym_index].n_value;
}
- break :inner 0;
- };
+ break :blk 0;
+ }
},
}
},
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 200ac5e568..c2d6d61066 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -12,6 +12,7 @@ const File = link.File;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
+const TypedValue = @import("../TypedValue.zig");
const std = @import("std");
const builtin = @import("builtin");
@@ -275,6 +276,14 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
return self.updateFinish(decl);
}
+pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl: *Module.Decl) !u32 {
+ _ = self;
+ _ = tv;
+ _ = decl;
+ log.debug("TODO lowerUnnamedConst for Plan9", .{});
+ return error.AnalysisFail;
+}
+
pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 7871dec24b..1044742627 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -7,6 +7,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) return error.SkipZigTest;
comptime try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
comptime try expect(@TypeOf(&foo) == *align(4) u8);
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index c907fb751b..d8fbc5ed9e 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -78,16 +78,19 @@ test "comptime_int @intToFloat" {
try expect(@TypeOf(result) == f64);
try expect(result == 1234.0);
}
- {
- const result = @intToFloat(f128, 1234);
- try expect(@TypeOf(result) == f128);
- try expect(result == 1234.0);
- }
- // big comptime_int (> 64 bits) to f128 conversion
- {
- const result = @intToFloat(f128, 0x1_0000_0000_0000_0000);
- try expect(@TypeOf(result) == f128);
- try expect(result == 0x1_0000_0000_0000_0000.0);
+ if (builtin.zig_backend != .stage2_x86_64 or builtin.os.tag != .macos) {
+ // TODO investigate why this traps on x86_64-macos
+ {
+ const result = @intToFloat(f128, 1234);
+ try expect(@TypeOf(result) == f128);
+ try expect(result == 1234.0);
+ }
+ // big comptime_int (> 64 bits) to f128 conversion
+ {
+ const result = @intToFloat(f128, 0x1_0000_0000_0000_0000);
+ try expect(@TypeOf(result) == f128);
+ try expect(result == 0x1_0000_0000_0000_0000.0);
+ }
}
}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index c470279aad..eb5a9e4273 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -51,6 +51,27 @@ test "non-packed struct has fields padded out to the required alignment" {
try expect(foo.fourth() == 2);
}
+const SmallStruct = struct {
+ a: u8,
+ b: u32,
+
+ fn first(self: *SmallStruct) u8 {
+ return self.a;
+ }
+
+ fn second(self: *SmallStruct) u32 {
+ return self.b;
+ }
+};
+
+test "lower unnamed constants" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
+ var foo = SmallStruct{ .a = 1, .b = 255 };
+ try expect(foo.first() == 1);
+ try expect(foo.second() == 255);
+}
+
const StructWithNoFields = struct {
fn add(a: i32, b: i32) i32 {
return a + b;
diff --git a/test/stage2/x86_64.zig b/test/stage2/x86_64.zig
index da2c662833..e482d1f7c7 100644
--- a/test/stage2/x86_64.zig
+++ b/test/stage2/x86_64.zig
@@ -1844,6 +1844,94 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
+
+ {
+ var case = ctx.exe("lower unnamed constants - structs", target);
+ case.addCompareOutput(
+ \\const Foo = struct {
+ \\ a: u8,
+ \\ b: u32,
+ \\
+ \\ fn first(self: *Foo) u8 {
+ \\ return self.a;
+ \\ }
+ \\
+ \\ fn second(self: *Foo) u32 {
+ \\ return self.b;
+ \\ }
+ \\};
+ \\
+ \\pub fn main() void {
+ \\ var foo = Foo{ .a = 1, .b = 5 };
+ \\ assert(foo.first() == 1);
+ \\ assert(foo.second() == 5);
+ \\}
+ \\
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ , "");
+
+ case.addCompareOutput(
+ \\const Foo = struct {
+ \\ a: u8,
+ \\ b: u32,
+ \\
+ \\ fn first(self: *Foo) u8 {
+ \\ return self.a;
+ \\ }
+ \\
+ \\ fn second(self: *Foo) u32 {
+ \\ return self.b;
+ \\ }
+ \\};
+ \\
+ \\pub fn main() void {
+ \\ var foo = Foo{ .a = 1, .b = 5 };
+ \\ assert(foo.first() == 1);
+ \\ assert(foo.second() == 5);
+ \\
+ \\ foo.a = 10;
+ \\ foo.b = 255;
+ \\
+ \\ assert(foo.first() == 10);
+ \\ assert(foo.second() == 255);
+ \\
+ \\ var foo2 = Foo{ .a = 15, .b = 255 };
+ \\ assert(foo2.first() == 15);
+ \\ assert(foo2.second() == 255);
+ \\}
+ \\
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ , "");
+
+ case.addCompareOutput(
+ \\const Foo = struct {
+ \\ a: u8,
+ \\ b: u32,
+ \\
+ \\ fn first(self: *Foo) u8 {
+ \\ return self.a;
+ \\ }
+ \\
+ \\ fn second(self: *Foo) u32 {
+ \\ return self.b;
+ \\ }
+ \\};
+ \\
+ \\pub fn main() void {
+ \\ var foo2 = Foo{ .a = 15, .b = 255 };
+ \\ assert(foo2.first() == 15);
+ \\ assert(foo2.second() == 255);
+ \\}
+ \\
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ , "");
+ }
}
}
From 2cc33367ebee202462d6bef7f07120dd38ff6912 Mon Sep 17 00:00:00 2001
From: Jonathan Marler
Date: Mon, 7 Feb 2022 01:49:15 -0700
Subject: [PATCH 0065/2031] fix bug when ReadFile returns synchronously in
collectOutputWindows
---
lib/std/child_process.zig | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 10aeacf755..ace58ecf4f 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -270,12 +270,14 @@ pub const ChildProcess = struct {
try buf.ensureTotalCapacity(new_capacity);
const next_buf = buf.unusedCapacitySlice();
if (next_buf.len == 0) return .full;
- const read_result = windows.kernel32.ReadFile(handle, next_buf.ptr, math.cast(u32, next_buf.len) catch maxInt(u32), null, overlapped);
+ var read_bytes: u32 = undefined;
+ const read_result = windows.kernel32.ReadFile(handle, next_buf.ptr, math.cast(u32, next_buf.len) catch maxInt(u32), &read_bytes, overlapped);
if (read_result == 0) return switch (windows.kernel32.GetLastError()) {
.IO_PENDING => .pending,
.BROKEN_PIPE => .closed,
else => |err| windows.unexpectedError(err),
};
+ buf.items.len += read_bytes;
}
}
From 435beb4e1dec117136ef9541530ebd3e70c2319d Mon Sep 17 00:00:00 2001
From: boofexxx
Date: Mon, 7 Feb 2022 13:11:25 +0500
Subject: [PATCH 0066/2031] std: fix doc comment typo in os.zig
---
lib/std/os.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 6d59080820..a935cfd8c0 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -993,7 +993,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
/// transfer further bytes or may result in an error (e.g., if the disk is now full).
///
/// For POSIX systems, if `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.k`.
+/// return error.WouldBlock when EAGAIN is received.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
///
From e382e7be2b7a4c40a8db78dca0de38141dad8c67 Mon Sep 17 00:00:00 2001
From: Evan Haas
Date: Sat, 5 Feb 2022 22:18:31 -0800
Subject: [PATCH 0067/2031] std: Allow `mem.zeroes` to work at comptime with
extern union
Fixes #10797
---
lib/std/mem.zig | 7 ++++---
test/run_translated_c.zig | 10 ++++++++++
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 71de42aad7..5ca7faf90b 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -309,9 +309,7 @@ pub fn zeroes(comptime T: type) T {
if (comptime meta.containerLayout(T) == .Extern) {
// The C language specification states that (global) unions
// should be zero initialized to the first named member.
- var item: T = undefined;
- @field(item, info.fields[0].name) = zeroes(@TypeOf(@field(item, info.fields[0].name)));
- return item;
+ return @unionInit(T, info.fields[0].name, zeroes(info.fields[0].field_type));
}
@compileError("Can't set a " ++ @typeName(T) ++ " to zero.");
@@ -417,6 +415,9 @@ test "mem.zeroes" {
var c = zeroes(C_union);
try testing.expectEqual(@as(u8, 0), c.a);
+
+ comptime var comptime_union = zeroes(C_union);
+ try testing.expectEqual(@as(u8, 0), comptime_union.a);
}
/// Initializes all fields of the struct with their default value, or zero values if no default value is present.
diff --git a/test/run_translated_c.zig b/test/run_translated_c.zig
index 80e889b44c..933cf54c4f 100644
--- a/test/run_translated_c.zig
+++ b/test/run_translated_c.zig
@@ -1819,4 +1819,14 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\ return 0;
\\}
, "");
+
+ cases.add("Zero-initialization of global union. Issue #10797",
+ \\#include
+ \\union U { int x; double y; };
+ \\union U u;
+ \\int main(void) {
+ \\ if (u.x != 0) abort();
+ \\ return 0;
+ \\}
+ , "");
}
From 3db130ff3d8175adce610f7805a149810cf7989d Mon Sep 17 00:00:00 2001
From: matu3ba
Date: Mon, 7 Feb 2022 19:27:21 +0100
Subject: [PATCH 0068/2031] compiler_rt: add addo (#10824)
- approach by Hacker's Delight with wrapping addition
- ca. 1.10x perf over the standard approach on my laptop
- tests with all combinations of min,max with -1,0,+1 and combinations of
sequences +-1,2,4..,max
---
lib/std/special/compiler_rt.zig | 6 ++
lib/std/special/compiler_rt/addo.zig | 38 ++++++++++
lib/std/special/compiler_rt/addodi4_test.zig | 77 +++++++++++++++++++
lib/std/special/compiler_rt/addosi4_test.zig | 78 ++++++++++++++++++++
lib/std/special/compiler_rt/addoti4_test.zig | 77 +++++++++++++++++++
lib/std/special/compiler_rt/mulo.zig | 2 +-
6 files changed, 277 insertions(+), 1 deletion(-)
create mode 100644 lib/std/special/compiler_rt/addo.zig
create mode 100644 lib/std/special/compiler_rt/addodi4_test.zig
create mode 100644 lib/std/special/compiler_rt/addosi4_test.zig
create mode 100644 lib/std/special/compiler_rt/addoti4_test.zig
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index da21745cce..286237aa7b 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -106,6 +106,12 @@ comptime {
}
// Integral arithmetic which returns if overflow
+ const __addosi4 = @import("compiler_rt/addo.zig").__addosi4;
+ @export(__addosi4, .{ .name = "__addosi4", .linkage = linkage });
+ const __addodi4 = @import("compiler_rt/addo.zig").__addodi4;
+ @export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
+ const __addoti4 = @import("compiler_rt/addo.zig").__addoti4;
+ @export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
const __mulosi4 = @import("compiler_rt/mulo.zig").__mulosi4;
@export(__mulosi4, .{ .name = "__mulosi4", .linkage = linkage });
const __mulodi4 = @import("compiler_rt/mulo.zig").__mulodi4;
diff --git a/lib/std/special/compiler_rt/addo.zig b/lib/std/special/compiler_rt/addo.zig
new file mode 100644
index 0000000000..966c74cb8e
--- /dev/null
+++ b/lib/std/special/compiler_rt/addo.zig
@@ -0,0 +1,38 @@
+const builtin = @import("builtin");
+
+// addo - add overflow
+// * return a+%b.
+// * return if a+b overflows => 1 else => 0
+// - addoXi4_generic as default
+
+inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ @setRuntimeSafety(builtin.is_test);
+ overflow.* = 0;
+ var sum: ST = a +% b;
+ // Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
+ // Let sum = a +% b == a + b + carry == wraparound addition.
+ // Overflow in a+b+carry occurs, iff a and b have opposite signs
+ // and the sign of a+b+carry is the same as a (or equivalently b).
+ // Slower routine: res = ~(a ^ b) & ((sum ^ a)
+ // Faster routine: res = (sum ^ a) & (sum ^ b)
+ // Oerflow occured, iff (res < 0)
+ if (((sum ^ a) & (sum ^ b)) < 0)
+ overflow.* = 1;
+ return sum;
+}
+
+pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+ return addoXi4_generic(i32, a, b, overflow);
+}
+pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+ return addoXi4_generic(i64, a, b, overflow);
+}
+pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+ return addoXi4_generic(i128, a, b, overflow);
+}
+
+test {
+ _ = @import("addosi4_test.zig");
+ _ = @import("addodi4_test.zig");
+ _ = @import("addoti4_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/addodi4_test.zig b/lib/std/special/compiler_rt/addodi4_test.zig
new file mode 100644
index 0000000000..f70a80a5b2
--- /dev/null
+++ b/lib/std/special/compiler_rt/addodi4_test.zig
@@ -0,0 +1,77 @@
+const addv = @import("addo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__addodi4(a: i64, b: i64) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = addv.__addodi4(a, b, &result_ov);
+ var expected: i64 = simple_addodi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+fn simple_addodi4(a: i64, b: i64, overflow: *c_int) i64 {
+ overflow.* = 0;
+ const min: i64 = math.minInt(i64);
+ const max: i64 = math.maxInt(i64);
+ if (((a > 0) and (b > max - a)) or
+ ((a < 0) and (b < min - a)))
+ overflow.* = 1;
+ return a +% b;
+}
+
+test "addodi4" {
+ const min: i64 = math.minInt(i64);
+ const max: i64 = math.maxInt(i64);
+ var i: i64 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__addodi4(i, i);
+ try test__addodi4(-i, -i);
+ try test__addodi4(i, -i);
+ try test__addodi4(-i, i);
+ }
+
+ // edge cases
+ // 0 + 0 = 0
+ // MIN + MIN overflow
+ // MAX + MAX overflow
+ // 0 + MIN MIN
+ // 0 + MAX MAX
+ // MIN + 0 MIN
+ // MAX + 0 MAX
+ // MIN + MAX -1
+ // MAX + MIN -1
+ try test__addodi4(0, 0);
+ try test__addodi4(min, min);
+ try test__addodi4(max, max);
+ try test__addodi4(0, min);
+ try test__addodi4(0, max);
+ try test__addodi4(min, 0);
+ try test__addodi4(max, 0);
+ try test__addodi4(min, max);
+ try test__addodi4(max, min);
+
+ // derived edge cases
+ // MIN+1 + MIN overflow
+ // MAX-1 + MAX overflow
+ // 1 + MIN = MIN+1
+ // -1 + MIN overflow
+ // -1 + MAX = MAX-1
+ // +1 + MAX overflow
+ // MIN + 1 = MIN+1
+ // MIN + -1 overflow
+ // MAX + 1 overflow
+ // MAX + -1 = MAX-1
+ try test__addodi4(min + 1, min);
+ try test__addodi4(max - 1, max);
+ try test__addodi4(1, min);
+ try test__addodi4(-1, min);
+ try test__addodi4(-1, max);
+ try test__addodi4(1, max);
+ try test__addodi4(min, 1);
+ try test__addodi4(min, -1);
+ try test__addodi4(max, -1);
+ try test__addodi4(max, 1);
+}
diff --git a/lib/std/special/compiler_rt/addosi4_test.zig b/lib/std/special/compiler_rt/addosi4_test.zig
new file mode 100644
index 0000000000..a8f81d70d1
--- /dev/null
+++ b/lib/std/special/compiler_rt/addosi4_test.zig
@@ -0,0 +1,78 @@
+const addv = @import("addo.zig");
+const testing = @import("std").testing;
+
+fn test__addosi4(a: i32, b: i32) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = addv.__addosi4(a, b, &result_ov);
+ var expected: i32 = simple_addosi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+fn simple_addosi4(a: i32, b: i32, overflow: *c_int) i32 {
+ overflow.* = 0;
+ const min: i32 = -2147483648;
+ const max: i32 = 2147483647;
+ if (((a > 0) and (b > max - a)) or
+ ((a < 0) and (b < min - a)))
+ overflow.* = 1;
+ return a +% b;
+}
+
+test "addosi4" {
+ // -2^31 <= i32 <= 2^31-1
+ // 2^31 = 2147483648
+ // 2^31-1 = 2147483647
+ const min: i32 = -2147483648;
+ const max: i32 = 2147483647;
+ var i: i32 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__addosi4(i, i);
+ try test__addosi4(-i, -i);
+ try test__addosi4(i, -i);
+ try test__addosi4(-i, i);
+ }
+
+ // edge cases
+ // 0 + 0 = 0
+ // MIN + MIN overflow
+ // MAX + MAX overflow
+ // 0 + MIN MIN
+ // 0 + MAX MAX
+ // MIN + 0 MIN
+ // MAX + 0 MAX
+ // MIN + MAX -1
+ // MAX + MIN -1
+ try test__addosi4(0, 0);
+ try test__addosi4(min, min);
+ try test__addosi4(max, max);
+ try test__addosi4(0, min);
+ try test__addosi4(0, max);
+ try test__addosi4(min, 0);
+ try test__addosi4(max, 0);
+ try test__addosi4(min, max);
+ try test__addosi4(max, min);
+
+ // derived edge cases
+ // MIN+1 + MIN overflow
+ // MAX-1 + MAX overflow
+ // 1 + MIN = MIN+1
+ // -1 + MIN overflow
+ // -1 + MAX = MAX-1
+ // +1 + MAX overflow
+ // MIN + 1 = MIN+1
+ // MIN + -1 overflow
+ // MAX + 1 overflow
+ // MAX + -1 = MAX-1
+ try test__addosi4(min + 1, min);
+ try test__addosi4(max - 1, max);
+ try test__addosi4(1, min);
+ try test__addosi4(-1, min);
+ try test__addosi4(-1, max);
+ try test__addosi4(1, max);
+ try test__addosi4(min, 1);
+ try test__addosi4(min, -1);
+ try test__addosi4(max, -1);
+ try test__addosi4(max, 1);
+}
diff --git a/lib/std/special/compiler_rt/addoti4_test.zig b/lib/std/special/compiler_rt/addoti4_test.zig
new file mode 100644
index 0000000000..dd0f4e3d3c
--- /dev/null
+++ b/lib/std/special/compiler_rt/addoti4_test.zig
@@ -0,0 +1,77 @@
+const addv = @import("addo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__addoti4(a: i128, b: i128) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = addv.__addoti4(a, b, &result_ov);
+ var expected: i128 = simple_addoti4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
+ overflow.* = 0;
+ const min: i128 = math.minInt(i128);
+ const max: i128 = math.maxInt(i128);
+ if (((a > 0) and (b > max - a)) or
+ ((a < 0) and (b < min - a)))
+ overflow.* = 1;
+ return a +% b;
+}
+
+test "addoti4" {
+ const min: i128 = math.minInt(i128);
+ const max: i128 = math.maxInt(i128);
+ var i: i128 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__addoti4(i, i);
+ try test__addoti4(-i, -i);
+ try test__addoti4(i, -i);
+ try test__addoti4(-i, i);
+ }
+
+ // edge cases
+ // 0 + 0 = 0
+ // MIN + MIN overflow
+ // MAX + MAX overflow
+ // 0 + MIN MIN
+ // 0 + MAX MAX
+ // MIN + 0 MIN
+ // MAX + 0 MAX
+ // MIN + MAX -1
+ // MAX + MIN -1
+ try test__addoti4(0, 0);
+ try test__addoti4(min, min);
+ try test__addoti4(max, max);
+ try test__addoti4(0, min);
+ try test__addoti4(0, max);
+ try test__addoti4(min, 0);
+ try test__addoti4(max, 0);
+ try test__addoti4(min, max);
+ try test__addoti4(max, min);
+
+ // derived edge cases
+ // MIN+1 + MIN overflow
+ // MAX-1 + MAX overflow
+ // 1 + MIN = MIN+1
+ // -1 + MIN overflow
+ // -1 + MAX = MAX-1
+ // +1 + MAX overflow
+ // MIN + 1 = MIN+1
+ // MIN + -1 overflow
+ // MAX + 1 overflow
+ // MAX + -1 = MAX-1
+ try test__addoti4(min + 1, min);
+ try test__addoti4(max - 1, max);
+ try test__addoti4(1, min);
+ try test__addoti4(-1, min);
+ try test__addoti4(-1, max);
+ try test__addoti4(1, max);
+ try test__addoti4(min, 1);
+ try test__addoti4(min, -1);
+ try test__addoti4(max, -1);
+ try test__addoti4(max, 1);
+}
diff --git a/lib/std/special/compiler_rt/mulo.zig b/lib/std/special/compiler_rt/mulo.zig
index df4c98134c..78590e5ce1 100644
--- a/lib/std/special/compiler_rt/mulo.zig
+++ b/lib/std/special/compiler_rt/mulo.zig
@@ -3,7 +3,7 @@ const std = @import("std");
const math = std.math;
// mulo - multiplication overflow
-// * return a*b.
+// * return a*%b.
// * return if a*b overflows => 1 else => 0
// - muloXi4_genericSmall as default
// - muloXi4_genericFast for 2*bitsize <= usize
From 0a7801236cf3601ff20b3c1c16cd27c1d089157e Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 7 Feb 2022 15:29:14 +0100
Subject: [PATCH 0069/2031] stage2,arm: add lowering of unnamed consts
* implement `struct_field_ptr` when `MCValue == .stack_argument_offset`
* enable simple `struct` test for ARM
---
src/arch/arm/CodeGen.zig | 46 +++++++++++++++++++++++++++++++++++++++-
test/behavior/struct.zig | 6 ++----
2 files changed, 47 insertions(+), 5 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 804aedb6cc..1859ce874f 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -1643,7 +1643,8 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
- const struct_ty = self.air.typeOf(operand).childType();
+ const ptr_ty = self.air.typeOf(operand);
+ const struct_ty = ptr_ty.childType();
const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
const struct_field_ty = struct_ty.structFieldType(index);
@@ -1652,6 +1653,28 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size };
},
+ .stack_argument_offset => {
+ const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
+ .immediate = struct_field_offset,
+ });
+ self.register_manager.freezeRegs(&.{offset_reg});
+ defer self.register_manager.unfreezeRegs(&.{offset_reg});
+
+ const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
+ self.register_manager.freezeRegs(&.{addr_reg});
+ defer self.register_manager.unfreezeRegs(&.{addr_reg});
+
+ const dst_reg = try self.register_manager.allocReg(inst);
+ try self.genBinOpCode(
+ dst_reg,
+ .{ .register = addr_reg },
+ .{ .register = offset_reg },
+ false,
+ .add,
+ .unsigned,
+ );
+ break :result MCValue{ .register = dst_reg };
+ },
else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
}
};
@@ -3841,6 +3864,24 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
_ = tv;
}
+fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
+ const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
+ return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
+ };
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
+ return MCValue{ .memory = vaddr };
+ } else if (self.bin_file.cast(link.File.MachO)) |_| {
+ unreachable;
+ } else if (self.bin_file.cast(link.File.Coff)) |_| {
+ return self.fail("TODO lower unnamed const in COFF", .{});
+ } else if (self.bin_file.cast(link.File.Plan9)) |_| {
+ return self.fail("TODO lower unnamed const in Plan9", .{});
+ } else {
+ return self.fail("TODO lower unnamed const", .{});
+ }
+}
+
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
@@ -3953,6 +3994,9 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
}
},
+ .Struct => {
+ return self.lowerUnnamedConst(typed_value);
+ },
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
}
}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index eb5a9e4273..5cf3776889 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -53,20 +53,18 @@ test "non-packed struct has fields padded out to the required alignment" {
const SmallStruct = struct {
a: u8,
- b: u32,
+ b: u8,
fn first(self: *SmallStruct) u8 {
return self.a;
}
- fn second(self: *SmallStruct) u32 {
+ fn second(self: *SmallStruct) u8 {
return self.b;
}
};
test "lower unnamed constants" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
var foo = SmallStruct{ .a = 1, .b = 255 };
try expect(foo.first() == 1);
try expect(foo.second() == 255);
From bbd7969c87e03c9312eeafd9940ef8287a3ad689 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 7 Feb 2022 16:02:43 +0100
Subject: [PATCH 0070/2031] stage2,x64: implement genSetStack for memory
operand
---
src/arch/x86_64/CodeGen.zig | 43 ++++++++++++++++++++++++++++++++-----
1 file changed, 38 insertions(+), 5 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index a60b8c78f0..7baf74c9c2 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -3487,6 +3487,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
}
fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerError!void {
+ const abi_size = ty.abiSize(self.target.*);
switch (mcv) {
.dead => unreachable,
.ptr_embedded_in_code => unreachable,
@@ -3510,7 +3511,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
return self.genSetStack(ty, stack_offset, .{ .register = reg });
},
.immediate => |x_big| {
- const abi_size = ty.abiSize(self.target.*);
const adj_off = stack_offset + @intCast(i32, abi_size);
if (adj_off > 128) {
return self.fail("TODO implement set stack variable with large stack offset", .{});
@@ -3583,7 +3583,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
if (stack_offset > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- const abi_size = ty.abiSize(self.target.*);
const adj_off = stack_offset + @intCast(i32, abi_size);
_ = try self.addInst(.{
.tag = .mov,
@@ -3600,11 +3599,46 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
.got_load,
.direct_load,
=> {
- if (ty.abiSize(self.target.*) <= 8) {
+ if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
}
- return self.fail("TODO implement memcpy for setting stack from {}", .{mcv});
+
+ try self.register_manager.getReg(.rax, null);
+ try self.register_manager.getReg(.rcx, null);
+
+ self.register_manager.freezeRegs(&.{ .rax, .rcx, .rbp });
+ defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx, .rbp });
+
+ const addr_reg: Register = blk: {
+ switch (mcv) {
+ .memory => |addr| {
+ const reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr });
+ break :blk reg;
+ },
+ else => {
+ return self.fail("TODO implement memcpy for setting stack from {}", .{mcv});
+ },
+ }
+ };
+
+ self.register_manager.freezeRegs(&.{addr_reg});
+ defer self.register_manager.unfreezeRegs(&.{addr_reg});
+
+ const regs = try self.register_manager.allocRegs(2, .{ null, null });
+ const count_reg = regs[0];
+ const tmp_reg = regs[1];
+
+ // TODO allow for abi_size to be u64
+ try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) });
+
+ return self.genInlineMemcpy(
+ -(stack_offset + @intCast(i32, abi_size)),
+ .rbp,
+ addr_reg.to64(),
+ count_reg.to64(),
+ tmp_reg.to8(),
+ );
},
.ptr_stack_offset => {
const reg = try self.copyToTmpRegister(ty, mcv);
@@ -3616,7 +3650,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
return;
}
- const abi_size = ty.abiSize(self.target.*);
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
From ac36fe71147a78e5f9299428eee776efcc9e4afb Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 7 Feb 2022 16:23:37 +0100
Subject: [PATCH 0071/2031] stage2,x64: fix registerAlias helper function
---
src/arch/x86_64/CodeGen.zig | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 7baf74c9c2..1aac404346 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -4692,11 +4692,17 @@ fn parseRegName(name: []const u8) ?Register {
fn registerAlias(reg: Register, size_bytes: u32) Register {
// For x86_64 we have to pick a smaller register alias depending on abi size.
- switch (size_bytes) {
- 1 => return reg.to8(),
- 2 => return reg.to16(),
- 4 => return reg.to32(),
- 8 => return reg.to64(),
- else => unreachable,
+ if (size_bytes == 0) {
+ unreachable; // should be comptime known
+ } else if (size_bytes <= 1) {
+ return reg.to8();
+ } else if (size_bytes <= 2) {
+ return reg.to16();
+ } else if (size_bytes <= 4) {
+ return reg.to32();
+ } else if (size_bytes <= 8) {
+ return reg.to64();
+ } else {
+ unreachable; // TODO handle floating-point registers
}
}
From 7a9b9df80e9799421393f476ea10332ba35ec258 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 7 Feb 2022 17:08:33 +0100
Subject: [PATCH 0072/2031] stage2,x64: impl masking reg for struct_field_val
---
src/arch/x86_64/CodeGen.zig | 27 +++++++++++++++++++++++----
1 file changed, 23 insertions(+), 4 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 1aac404346..e2ae339bae 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2025,16 +2025,35 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
- const struct_size = @intCast(i32, struct_ty.abiSize(self.target.*));
- const struct_field_offset = @intCast(i32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_size = struct_ty.abiSize(self.target.*);
+ const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*);
const struct_field_ty = struct_ty.structFieldType(index);
- const struct_field_size = @intCast(i32, struct_field_ty.abiSize(self.target.*));
+ const struct_field_size = struct_field_ty.abiSize(self.target.*);
switch (mcv) {
.stack_offset => |off| {
- const stack_offset = off + struct_size - struct_field_offset - struct_field_size;
+ const offset_to_field = struct_size - struct_field_offset - struct_field_size;
+ const stack_offset = off + @intCast(i32, offset_to_field);
break :result MCValue{ .stack_offset = stack_offset };
},
+ .register => |reg| {
+ // 1. Shift by struct_field_offset.
+ // 2. Mask with reg.size() - struct_field_size
+ // 3. Return in register
+
+ // TODO check if register can be re-used
+ self.register_manager.freezeRegs(&.{reg});
+ defer self.register_manager.unfreezeRegs(&.{reg});
+ const dst_mcv = try self.copyToNewRegister(inst, Type.usize, .{ .register = reg.to64() });
+
+ // TODO shift here
+
+ const mask_shift = @intCast(u6, (64 - struct_field_ty.bitSize(self.target.*)));
+ const mask = (~@as(u64, 0)) >> mask_shift;
+ try self.genBinMathOpMir(.@"and", Type.usize, dst_mcv, .{ .immediate = mask });
+
+ break :result dst_mcv;
+ },
else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
}
};
From becbf446d3d33fd73a7c1567e89a98a9b191d4e8 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 7 Feb 2022 20:11:42 +0100
Subject: [PATCH 0073/2031] stage2,x64: impl lowering of shift ops in Emit
---
src/arch/x86_64/CodeGen.zig | 39 +++++++--
src/arch/x86_64/Emit.zig | 156 ++++++++++++++++++++++++++++++++----
src/arch/x86_64/Mir.zig | 55 +++++++------
src/codegen.zig | 1 -
4 files changed, 203 insertions(+), 48 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index e2ae339bae..2642d5ce15 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2037,17 +2037,42 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .stack_offset = stack_offset };
},
.register => |reg| {
- // 1. Shift by struct_field_offset.
- // 2. Mask with reg.size() - struct_field_size
- // 3. Return in register
-
- // TODO check if register can be re-used
self.register_manager.freezeRegs(&.{reg});
defer self.register_manager.unfreezeRegs(&.{reg});
- const dst_mcv = try self.copyToNewRegister(inst, Type.usize, .{ .register = reg.to64() });
- // TODO shift here
+ const dst_mcv = blk: {
+ if (self.reuseOperand(inst, operand, 0, mcv)) {
+ break :blk mcv;
+ } else {
+ const dst_mcv = try self.copyToNewRegister(inst, Type.usize, .{ .register = reg.to64() });
+ break :blk dst_mcv;
+ }
+ };
+ // Shift by struct_field_offset.
+ const shift_amount = @intCast(u8, struct_field_offset * 8);
+ if (shift_amount > 0) {
+ if (shift_amount == 1) {
+ _ = try self.addInst(.{
+ .tag = .shr,
+ .ops = (Mir.Ops{
+ .reg1 = dst_mcv.register,
+ }).encode(),
+ .data = undefined,
+ });
+ } else {
+ _ = try self.addInst(.{
+ .tag = .shr,
+ .ops = (Mir.Ops{
+ .reg1 = dst_mcv.register,
+ .flags = 0b10,
+ }).encode(),
+ .data = .{ .imm = shift_amount },
+ });
+ }
+ }
+
+ // Mask with reg.size() - struct_field_size
const mask_shift = @intCast(u6, (64 - struct_field_ty.bitSize(self.target.*)));
const mask = (~@as(u64, 0)) >> mask_shift;
try self.genBinMathOpMir(.@"and", Type.usize, dst_mcv, .{ .immediate = mask });
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index be26354402..3f221f0f19 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -133,6 +133,11 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.lea => try emit.mirLea(inst),
.lea_pie => try emit.mirLeaPie(inst),
+ .shl => try emit.mirShift(.shl, inst),
+ .sal => try emit.mirShift(.sal, inst),
+ .shr => try emit.mirShift(.shr, inst),
+ .sar => try emit.mirShift(.sar, inst),
+
.imul_complex => try emit.mirIMulComplex(inst),
.push => try emit.mirPushPop(.push, inst),
@@ -653,6 +658,31 @@ fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
return lowerToFdEnc(.mov, ops.reg1, imm, emit.code);
}
+fn mirShift(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
+ const ops = Mir.Ops.decode(emit.mir.instructions.items(.ops)[inst]);
+ switch (ops.flags) {
+ 0b00 => {
+ // sal reg1, 1
+ // M1
+ return lowerToM1Enc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
+ },
+ 0b01 => {
+ // sal reg1, .cl
+ // MC
+ return lowerToMcEnc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
+ },
+ 0b10 => {
+ // sal reg1, imm8
+ // MI
+ const imm = @truncate(u8, emit.mir.instructions.items(.data)[inst].imm);
+ return lowerToMiImm8Enc(tag, RegisterOrMemory.reg(ops.reg1), imm, emit.code);
+ },
+ 0b11 => {
+ return emit.fail("TODO unused variant: SHIFT reg1, 0b11", .{});
+ },
+ }
+}
+
fn mirIMulComplex(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .imul_complex);
@@ -743,13 +773,13 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
emit.code,
);
const end_offset = emit.code.items.len;
- const reloc_type = switch (ops.flags) {
- 0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
- 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
- else => return emit.fail("TODO unused LEA PIE variants 0b10 and 0b11", .{}),
- };
const sym_index = emit.mir.instructions.items(.data)[inst].linker_sym_index;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
+ const reloc_type = switch (ops.flags) {
+ 0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
+ 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
+ else => return emit.fail("TODO unused LEA PIE variants 0b10 and 0b11", .{}),
+ };
const decl = macho_file.active_decl.?;
try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
.offset = @intCast(u32, end_offset - 4),
@@ -1064,6 +1094,10 @@ const Tag = enum {
setng,
setnle,
setg,
+ shl,
+ sal,
+ shr,
+ sar,
fn isSetCC(tag: Tag) bool {
return switch (tag) {
@@ -1119,9 +1153,18 @@ const Encoding = enum {
/// OP imm32
i,
+ /// OP r/m64, 1
+ m1,
+
+ /// OP r/m64, .cl
+ mc,
+
/// OP r/m64, imm32
mi,
+ /// OP r/m64, imm8
+ mi8,
+
/// OP r/m64, r64
mr,
@@ -1230,12 +1273,25 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) ?OpCode {
.ret_far => OpCode.oneByte(0xca),
else => null,
},
+ .m1 => return switch (tag) {
+ .shl, .sal, .shr, .sar => OpCode.oneByte(if (is_one_byte) 0xd0 else 0xd1),
+ else => null,
+ },
+ .mc => return switch (tag) {
+ .shl, .sal, .shr, .sar => OpCode.oneByte(if (is_one_byte) 0xd2 else 0xd3),
+ else => null,
+ },
.mi => return switch (tag) {
.adc, .add, .sub, .xor, .@"and", .@"or", .sbb, .cmp => OpCode.oneByte(if (is_one_byte) 0x80 else 0x81),
.mov => OpCode.oneByte(if (is_one_byte) 0xc6 else 0xc7),
.@"test" => OpCode.oneByte(if (is_one_byte) 0xf6 else 0xf7),
else => null,
},
+ .mi8 => return switch (tag) {
+ .adc, .add, .sub, .xor, .@"and", .@"or", .sbb, .cmp => OpCode.oneByte(0x83),
+ .shl, .sal, .shr, .sar => OpCode.oneByte(if (is_one_byte) 0xc0 else 0xc1),
+ else => null,
+ },
.mr => return switch (tag) {
.adc => OpCode.oneByte(if (is_one_byte) 0x10 else 0x11),
.add => OpCode.oneByte(if (is_one_byte) 0x00 else 0x01),
@@ -1331,6 +1387,11 @@ inline fn getModRmExt(tag: Tag) ?u3 {
.setnle,
.setg,
=> 0x0,
+ .shl,
+ .sal,
+ => 0x4,
+ .shr => 0x5,
+ .sar => 0x7,
else => null,
};
}
@@ -1528,8 +1589,8 @@ fn lowerToDEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
encoder.imm32(@bitCast(i32, imm));
}
-fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
- const opc = getOpCode(tag, .m, false).?;
+fn lowerToMxEnc(tag: Tag, reg_or_mem: RegisterOrMemory, enc: Encoding, code: *std.ArrayList(u8)) InnerError!void {
+ const opc = getOpCode(tag, enc, reg_or_mem.size() == 8).?;
const modrm_ext = getModRmExt(tag).?;
switch (reg_or_mem) {
.register => |reg| {
@@ -1537,11 +1598,9 @@ fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8))
if (reg.size() == 16) {
encoder.prefix16BitMode();
}
+ const wide = if (tag == .jmp_near) false else setRexWRegister(reg);
encoder.rex(.{
- .w = switch (reg) {
- .ah, .bh, .ch, .dh => true,
- else => false,
- },
+ .w = wide,
.b = reg.isExtended(),
});
opc.encode(encoder);
@@ -1553,8 +1612,9 @@ fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8))
encoder.prefix16BitMode();
}
if (mem_op.base) |base| {
+ const wide = if (tag == .jmp_near) false else mem_op.ptr_size == .qword_ptr;
encoder.rex(.{
- .w = false,
+ .w = wide,
.b = base.isExtended(),
});
}
@@ -1564,6 +1624,18 @@ fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8))
}
}
+fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
+ return lowerToMxEnc(tag, reg_or_mem, .m, code);
+}
+
+fn lowerToM1Enc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
+ return lowerToMxEnc(tag, reg_or_mem, .m1, code);
+}
+
+fn lowerToMcEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
+ return lowerToMxEnc(tag, reg_or_mem, .mc, code);
+}
+
fn lowerToTdEnc(tag: Tag, moffs: u64, reg: Register, code: *std.ArrayList(u8)) InnerError!void {
return lowerToTdFdEnc(tag, reg, moffs, code, true);
}
@@ -1614,9 +1686,15 @@ fn lowerToOiEnc(tag: Tag, reg: Register, imm: u64, code: *std.ArrayList(u8)) Inn
}
}
-fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
+fn lowerToMiXEnc(
+ tag: Tag,
+ reg_or_mem: RegisterOrMemory,
+ imm: u32,
+ enc: Encoding,
+ code: *std.ArrayList(u8),
+) InnerError!void {
const modrm_ext = getModRmExt(tag).?;
- const opc = getOpCode(tag, .mi, reg_or_mem.size() == 8).?;
+ const opc = getOpCode(tag, enc, reg_or_mem.size() == 8).?;
switch (reg_or_mem) {
.register => |dst_reg| {
const encoder = try Encoder.init(code, 7);
@@ -1632,7 +1710,7 @@ fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u32, code: *std.Arr
});
opc.encode(encoder);
encoder.modRm_direct(modrm_ext, dst_reg.lowId());
- encodeImm(encoder, imm, dst_reg.size());
+ encodeImm(encoder, imm, if (enc == .mi8) 8 else dst_reg.size());
},
.memory => |dst_mem| {
const encoder = try Encoder.init(code, 12);
@@ -1651,11 +1729,19 @@ fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u32, code: *std.Arr
}
opc.encode(encoder);
dst_mem.encode(encoder, modrm_ext);
- encodeImm(encoder, imm, dst_mem.ptr_size.size());
+ encodeImm(encoder, imm, if (enc == .mi8) 8 else dst_mem.ptr_size.size());
},
}
}
+fn lowerToMiImm8Enc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u8, code: *std.ArrayList(u8)) InnerError!void {
+ return lowerToMiXEnc(tag, reg_or_mem, imm, .mi8, code);
+}
+
+fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
+ return lowerToMiXEnc(tag, reg_or_mem, imm, .mi, code);
+}
+
fn lowerToRmEnc(
tag: Tag,
reg: Register,
@@ -1902,6 +1988,9 @@ test "lower MI encoding" {
emit.lowered(),
"mov qword ptr [rcx*2 + 0x10000000], 0x10",
);
+
+ try lowerToMiImm8Enc(.add, RegisterOrMemory.reg(.rax), 0x10, emit.code());
+ try expectEqualHexStrings("\x48\x83\xC0\x10", emit.lowered(), "add rax, 0x10");
}
test "lower RM encoding" {
@@ -2100,6 +2189,41 @@ test "lower M encoding" {
try expectEqualHexStrings("\x41\x0F\x97\xC3", emit.lowered(), "seta r11b");
}
+test "lower M1 and MC encodings" {
+ var emit = TestEmit.init();
+ defer emit.deinit();
+ try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12), emit.code());
+ try expectEqualHexStrings("\x49\xD1\xE4", emit.lowered(), "sal r12, 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12d), emit.code());
+ try expectEqualHexStrings("\x41\xD1\xE4", emit.lowered(), "sal r12d, 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12w), emit.code());
+ try expectEqualHexStrings("\x66\x41\xD1\xE4", emit.lowered(), "sal r12w, 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12b), emit.code());
+ try expectEqualHexStrings("\x41\xD0\xE4", emit.lowered(), "sal r12b, 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.reg(.rax), emit.code());
+ try expectEqualHexStrings("\x48\xD1\xE0", emit.lowered(), "sal rax, 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.reg(.eax), emit.code());
+ try expectEqualHexStrings("\xD1\xE0", emit.lowered(), "sal eax, 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = @bitCast(u32, @as(i32, -0x10)),
+ .base = .rbp,
+ }), emit.code());
+ try expectEqualHexStrings("\x48\xD1\x65\xF0", emit.lowered(), "sal qword ptr [rbp - 0x10], 1");
+ try lowerToM1Enc(.sal, RegisterOrMemory.mem(.dword_ptr, .{
+ .disp = @bitCast(u32, @as(i32, -0x10)),
+ .base = .rbp,
+ }), emit.code());
+ try expectEqualHexStrings("\xD1\x65\xF0", emit.lowered(), "sal dword ptr [rbp - 0x10], 1");
+
+ try lowerToMcEnc(.shr, RegisterOrMemory.reg(.r12), emit.code());
+ try expectEqualHexStrings("\x49\xD3\xEC", emit.lowered(), "shr r12, cl");
+ try lowerToMcEnc(.shr, RegisterOrMemory.reg(.rax), emit.code());
+ try expectEqualHexStrings("\x48\xD3\xE8", emit.lowered(), "shr rax, cl");
+
+ try lowerToMcEnc(.sar, RegisterOrMemory.reg(.rsi), emit.code());
+ try expectEqualHexStrings("\x48\xD3\xFE", emit.lowered(), "sar rsi, cl");
+}
+
test "lower O encoding" {
var emit = TestEmit.init();
defer emit.deinit();
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 2e8a9cf332..aaabcab04d 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -142,30 +142,6 @@ pub const Inst = struct {
rcr_scale_dst,
rcr_scale_imm,
rcr_mem_index_imm,
- shl,
- shl_mem_imm,
- shl_scale_src,
- shl_scale_dst,
- shl_scale_imm,
- shl_mem_index_imm,
- sal,
- sal_mem_imm,
- sal_scale_src,
- sal_scale_dst,
- sal_scale_imm,
- sal_mem_index_imm,
- shr,
- shr_mem_imm,
- shr_scale_src,
- shr_scale_dst,
- shr_scale_imm,
- shr_mem_index_imm,
- sar,
- sar_mem_imm,
- sar_scale_src,
- sar_scale_dst,
- sar_scale_imm,
- sar_mem_index_imm,
sbb,
sbb_mem_imm,
sbb_scale_src,
@@ -212,6 +188,37 @@ pub const Inst = struct {
/// * `Data` contains `linker_sym_index`
lea_pie,
+ /// ops flags: form:
+ /// 0b00 reg1, 1
+ /// 0b01 reg1, .cl
+ /// 0b10 reg1, imm8
+ /// Notes:
+ /// * If flags == 0b10, uses `imm`.
+ shl,
+ shl_mem_imm,
+ shl_scale_src,
+ shl_scale_dst,
+ shl_scale_imm,
+ shl_mem_index_imm,
+ sal,
+ sal_mem_imm,
+ sal_scale_src,
+ sal_scale_dst,
+ sal_scale_imm,
+ sal_mem_index_imm,
+ shr,
+ shr_mem_imm,
+ shr_scale_src,
+ shr_scale_dst,
+ shr_scale_imm,
+ shr_mem_index_imm,
+ sar,
+ sar_mem_imm,
+ sar_scale_src,
+ sar_scale_dst,
+ sar_scale_imm,
+ sar_mem_index_imm,
+
/// ops flags: form:
/// 0bX0 reg1
/// 0bX1 [reg1 + imm32]
diff --git a/src/codegen.zig b/src/codegen.zig
index bcd36358b1..059d2adc14 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -372,7 +372,6 @@ pub fn generateSymbol(
return Result{ .appended = {} };
},
.Struct => {
- // TODO debug info
const struct_obj = typed_value.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
return Result{
From 8a94971980001d29d7c8cdfe6ca25aa834552405 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 7 Feb 2022 12:19:43 -0700
Subject: [PATCH 0074/2031] std: fix i386-openbsd failing to build from source
closes #9705
---
lib/std/c/openbsd.zig | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index d2a36fc5df..6ba11e8e5a 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -982,7 +982,7 @@ comptime {
std.debug.assert(@sizeOf(siginfo_t) == 136);
}
-const arch_bits = switch (builtin.cpu.arch) {
+pub usingnamespace switch (builtin.cpu.arch) {
.x86_64 => struct {
pub const ucontext_t = extern struct {
sc_rdi: c_long,
@@ -1012,7 +1012,7 @@ const arch_bits = switch (builtin.cpu.arch) {
sc_rsp: c_long,
sc_ss: c_long,
- sc_fpstate: arch_bits.fxsave64,
+ sc_fpstate: fxsave64,
__sc_unused: c_int,
sc_mask: c_int,
sc_cookie: c_long,
@@ -1035,8 +1035,6 @@ const arch_bits = switch (builtin.cpu.arch) {
},
else => struct {},
};
-pub const ucontext_t = arch_bits.ucontext_t;
-pub const fxsave64 = arch_bits.fxsave64;
pub const sigset_t = c_uint;
pub const empty_sigset: sigset_t = 0;
From 785bccd4cea63b3f2266e7f8928000e2d5110b3b Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 7 Feb 2022 20:48:31 +0100
Subject: [PATCH 0075/2031] stage2: pass more struct tests
---
test/behavior/struct.zig | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 5cf3776889..e4b64a39d3 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -9,7 +9,7 @@ const maxInt = std.math.maxInt;
top_level_field: i32,
test "top level fields" {
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var instance = @This(){
.top_level_field = 1234,
@@ -176,7 +176,7 @@ const MemberFnTestFoo = struct {
};
test "call member function directly" {
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const result = MemberFnTestFoo.member(instance);
@@ -184,7 +184,7 @@ test "call member function directly" {
}
test "store member function in variable" {
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const memberFn = MemberFnTestFoo.member;
@@ -206,7 +206,7 @@ const MemberFnRand = struct {
};
test "return struct byval from function" {
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bar = makeBar2(1234, 5678);
try expect(bar.y == 5678);
From 2a415a033cce07b579a490eca2556e7d700c04b1 Mon Sep 17 00:00:00 2001
From: Arnavion
Date: Wed, 22 Dec 2021 16:48:46 -0800
Subject: [PATCH 0076/2031] std.bit_set: add setRangeValue(Range, bool)
For large ranges, this is faster than having the caller call setValue() for
each index in the range. Masks wholly covered by the range can be set to
the new mask value in one go, and the two masks at either end that are
partially covered can each set the covered range of bits in one go.
---
lib/std/bit_set.zig | 174 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 174 insertions(+)
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index 5101f934bc..d839512c07 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -110,6 +110,31 @@ pub fn IntegerBitSet(comptime size: u16) type {
self.mask |= maskBit(index);
}
+ /// Changes the value of all bits in the specified range to
+ /// match the passed boolean.
+ pub fn setRangeValue(self: *Self, range: Range, value: bool) void {
+ assert(range.end <= bit_length);
+ assert(range.start <= range.end);
+ if (range.start == range.end) return;
+ if (MaskInt == u0) return;
+
+ const start_bit = @intCast(ShiftInt, range.start);
+
+ var mask = std.math.boolMask(MaskInt, true) << start_bit;
+ if (range.end != bit_length) {
+ const end_bit = @intCast(ShiftInt, range.end);
+ mask &= std.math.boolMask(MaskInt, true) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit));
+ }
+ self.mask &= ~mask;
+
+ mask = std.math.boolMask(MaskInt, value) << start_bit;
+ if (range.end != bit_length) {
+ const end_bit = @intCast(ShiftInt, range.end);
+ mask &= std.math.boolMask(MaskInt, value) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit));
+ }
+ self.mask |= mask;
+ }
+
/// Removes a specific bit from the bit set
pub fn unset(self: *Self, index: usize) void {
assert(index < bit_length);
@@ -345,6 +370,51 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
self.masks[maskIndex(index)] |= maskBit(index);
}
+ /// Changes the value of all bits in the specified range to
+ /// match the passed boolean.
+ pub fn setRangeValue(self: *Self, range: Range, value: bool) void {
+ assert(range.end <= bit_length);
+ assert(range.start <= range.end);
+ if (range.start == range.end) return;
+ if (num_masks == 0) return;
+
+ const start_mask_index = maskIndex(range.start);
+ const start_bit = @truncate(ShiftInt, range.start);
+
+ const end_mask_index = maskIndex(range.end);
+ const end_bit = @truncate(ShiftInt, range.end);
+
+ if (start_mask_index == end_mask_index) {
+ var mask1 = std.math.boolMask(MaskInt, true) << start_bit;
+ var mask2 = std.math.boolMask(MaskInt, true) >> (mask_len - 1) - (end_bit - 1);
+ self.masks[start_mask_index] &= ~(mask1 & mask2);
+
+ mask1 = std.math.boolMask(MaskInt, value) << start_bit;
+ mask2 = std.math.boolMask(MaskInt, value) >> (mask_len - 1) - (end_bit - 1);
+ self.masks[start_mask_index] |= mask1 & mask2;
+ } else {
+ var bulk_mask_index: usize = undefined;
+ if (start_bit > 0) {
+ self.masks[start_mask_index] =
+ (self.masks[start_mask_index] & ~(std.math.boolMask(MaskInt, true) << start_bit)) |
+ (std.math.boolMask(MaskInt, value) << start_bit);
+ bulk_mask_index = start_mask_index + 1;
+ } else {
+ bulk_mask_index = start_mask_index;
+ }
+
+ while (bulk_mask_index < end_mask_index) : (bulk_mask_index += 1) {
+ self.masks[bulk_mask_index] = std.math.boolMask(MaskInt, value);
+ }
+
+ if (end_bit > 0) {
+ self.masks[end_mask_index] =
+ (self.masks[end_mask_index] & (std.math.boolMask(MaskInt, true) << end_bit)) |
+ (std.math.boolMask(MaskInt, value) >> ((@bitSizeOf(MaskInt) - 1) - (end_bit - 1)));
+ }
+ }
+ }
+
/// Removes a specific bit from the bit set
pub fn unset(self: *Self, index: usize) void {
assert(index < bit_length);
@@ -608,6 +678,50 @@ pub const DynamicBitSetUnmanaged = struct {
self.masks[maskIndex(index)] |= maskBit(index);
}
+ /// Changes the value of all bits in the specified range to
+ /// match the passed boolean.
+ pub fn setRangeValue(self: *Self, range: Range, value: bool) void {
+ assert(range.end <= self.bit_length);
+ assert(range.start <= range.end);
+ if (range.start == range.end) return;
+
+ const start_mask_index = maskIndex(range.start);
+ const start_bit = @truncate(ShiftInt, range.start);
+
+ const end_mask_index = maskIndex(range.end);
+ const end_bit = @truncate(ShiftInt, range.end);
+
+ if (start_mask_index == end_mask_index) {
+ var mask1 = std.math.boolMask(MaskInt, true) << start_bit;
+ var mask2 = std.math.boolMask(MaskInt, true) >> (@bitSizeOf(MaskInt) - 1) - (end_bit - 1);
+ self.masks[start_mask_index] &= ~(mask1 & mask2);
+
+ mask1 = std.math.boolMask(MaskInt, value) << start_bit;
+ mask2 = std.math.boolMask(MaskInt, value) >> (@bitSizeOf(MaskInt) - 1) - (end_bit - 1);
+ self.masks[start_mask_index] |= mask1 & mask2;
+ } else {
+ var bulk_mask_index: usize = undefined;
+ if (start_bit > 0) {
+ self.masks[start_mask_index] =
+ (self.masks[start_mask_index] & ~(std.math.boolMask(MaskInt, true) << start_bit)) |
+ (std.math.boolMask(MaskInt, value) << start_bit);
+ bulk_mask_index = start_mask_index + 1;
+ } else {
+ bulk_mask_index = start_mask_index;
+ }
+
+ while (bulk_mask_index < end_mask_index) : (bulk_mask_index += 1) {
+ self.masks[bulk_mask_index] = std.math.boolMask(MaskInt, value);
+ }
+
+ if (end_bit > 0) {
+ self.masks[end_mask_index] =
+ (self.masks[end_mask_index] & (std.math.boolMask(MaskInt, true) << end_bit)) |
+ (std.math.boolMask(MaskInt, value) >> ((@bitSizeOf(MaskInt) - 1) - (end_bit - 1)));
+ }
+ }
+ }
+
/// Removes a specific bit from the bit set
pub fn unset(self: *Self, index: usize) void {
assert(index < self.bit_length);
@@ -811,6 +925,12 @@ pub const DynamicBitSet = struct {
self.unmanaged.set(index);
}
+ /// Changes the value of all bits in the specified range to
+ /// match the passed boolean.
+ pub fn setRangeValue(self: *Self, range: Range, value: bool) void {
+ self.unmanaged.setRangeValue(range, value);
+ }
+
/// Removes a specific bit from the bit set
pub fn unset(self: *Self, index: usize) void {
self.unmanaged.unset(index);
@@ -990,6 +1110,14 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ
};
}
+/// A range of indices within a bitset.
+pub const Range = struct {
+ /// The index of the first bit of interest.
+ start: usize,
+ /// The index immediately after the last bit of interest.
+ end: usize,
+};
+
// ---------------- Tests -----------------
const testing = std.testing;
@@ -1144,6 +1272,52 @@ fn testBitSet(a: anytype, b: anytype, len: usize) !void {
try testing.expectEqual(@as(?usize, null), a.findFirstSet());
try testing.expectEqual(@as(?usize, null), a.toggleFirstSet());
try testing.expectEqual(@as(usize, 0), a.count());
+
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ try testing.expectEqual(@as(usize, 0), a.count());
+
+ a.setRangeValue(.{ .start = 0, .end = len }, true);
+ try testing.expectEqual(len, a.count());
+
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ a.setRangeValue(.{ .start = 0, .end = 0 }, true);
+ try testing.expectEqual(@as(usize, 0), a.count());
+
+ a.setRangeValue(.{ .start = len, .end = len }, true);
+ try testing.expectEqual(@as(usize, 0), a.count());
+
+ if (len >= 1) {
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ a.setRangeValue(.{ .start = 0, .end = 1 }, true);
+ try testing.expectEqual(@as(usize, 1), a.count());
+ try testing.expect(a.isSet(0));
+
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ a.setRangeValue(.{ .start = 0, .end = len - 1 }, true);
+ try testing.expectEqual(len - 1, a.count());
+ try testing.expect(!a.isSet(len - 1));
+
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ a.setRangeValue(.{ .start = 1, .end = len }, true);
+ try testing.expectEqual(@as(usize, len - 1), a.count());
+ try testing.expect(!a.isSet(0));
+
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ a.setRangeValue(.{ .start = len - 1, .end = len }, true);
+ try testing.expectEqual(@as(usize, 1), a.count());
+ try testing.expect(a.isSet(len - 1));
+
+ if (len >= 4) {
+ a.setRangeValue(.{ .start = 0, .end = len }, false);
+ a.setRangeValue(.{ .start = 1, .end = len - 2 }, true);
+ try testing.expectEqual(@as(usize, len - 3), a.count());
+ try testing.expect(!a.isSet(0));
+ try testing.expect(a.isSet(1));
+ try testing.expect(a.isSet(len - 3));
+ try testing.expect(!a.isSet(len - 2));
+ try testing.expect(!a.isSet(len - 1));
+ }
+ }
}
fn testStaticBitSet(comptime Set: type) !void {
From 05cf69209e44c59f838f94ab355485d2d3a0432a Mon Sep 17 00:00:00 2001
From: John Schmidt
Date: Mon, 7 Feb 2022 18:20:33 +0100
Subject: [PATCH 0077/2031] debug: implement segfault handler for macOS aarch64
Tested on a M1 MacBook Pro, macOS Monterey 12.2.
---
lib/std/c/darwin.zig | 65 +++++++++++-------------------------
lib/std/c/darwin/aarch64.zig | 18 ++++++++++
lib/std/c/darwin/x86_64.zig | 30 +++++++++++++++++
lib/std/debug.zig | 20 ++++++++---
4 files changed, 83 insertions(+), 50 deletions(-)
create mode 100644 lib/std/c/darwin/aarch64.zig
create mode 100644 lib/std/c/darwin/x86_64.zig
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index b10b582dc2..f4ca9cd6dd 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -6,6 +6,26 @@ const native_arch = builtin.target.cpu.arch;
const maxInt = std.math.maxInt;
const iovec_const = std.os.iovec_const;
+const arch_bits = switch (native_arch) {
+ .aarch64 => @import("darwin/aarch64.zig"),
+ .x86_64 => @import("darwin/x86_64.zig"),
+ else => struct {},
+};
+
+pub const ucontext_t = extern struct {
+ onstack: c_int,
+ sigmask: sigset_t,
+ stack: stack_t,
+ link: ?*ucontext_t,
+ mcsize: u64,
+ mcontext: *mcontext_t,
+};
+
+pub const mcontext_t = extern struct {
+ es: arch_bits.exception_state,
+ ss: arch_bits.thread_state,
+};
+
extern "c" fn __error() *c_int;
pub extern "c" fn NSVersionOfRunTimeLibrary(library_name: [*:0]const u8) u32;
pub extern "c" fn _NSGetExecutablePath(buf: [*:0]u8, bufsize: *u32) c_int;
@@ -478,51 +498,6 @@ pub const SIG = struct {
pub const USR2 = 31;
};
-pub const ucontext_t = extern struct {
- onstack: c_int,
- sigmask: sigset_t,
- stack: stack_t,
- link: ?*ucontext_t,
- mcsize: u64,
- mcontext: *mcontext_t,
-};
-
-pub const exception_state = extern struct {
- trapno: u16,
- cpu: u16,
- err: u32,
- faultvaddr: u64,
-};
-
-pub const thread_state = extern struct {
- rax: u64,
- rbx: u64,
- rcx: u64,
- rdx: u64,
- rdi: u64,
- rsi: u64,
- rbp: u64,
- rsp: u64,
- r8: u64,
- r9: u64,
- r10: u64,
- r11: u64,
- r12: u64,
- r13: u64,
- r14: u64,
- r15: u64,
- rip: u64,
- rflags: u64,
- cs: u64,
- fs: u64,
- gs: u64,
-};
-
-pub const mcontext_t = extern struct {
- es: exception_state,
- ss: thread_state,
-};
-
pub const siginfo_t = extern struct {
signo: c_int,
errno: c_int,
diff --git a/lib/std/c/darwin/aarch64.zig b/lib/std/c/darwin/aarch64.zig
new file mode 100644
index 0000000000..70153b5dfb
--- /dev/null
+++ b/lib/std/c/darwin/aarch64.zig
@@ -0,0 +1,18 @@
+// See C headers in
+// lib/libc/include/aarch64-macos.12-gnu/mach/arm/_structs.h
+
+pub const exception_state = extern struct {
+ far: u64, // Virtual Fault Address
+ esr: u32, // Exception syndrome
+ exception: u32, // Number of arm exception taken
+};
+
+pub const thread_state = extern struct {
+ regs: [29]u64, // General purpose registers
+ fp: u64, // Frame pointer x29
+ lr: u64, // Link register x30
+ sp: u64, // Stack pointer x31
+ pc: u64, // Program counter
+ cpsr: u32, // Current program status register
+ __pad: u32,
+};
diff --git a/lib/std/c/darwin/x86_64.zig b/lib/std/c/darwin/x86_64.zig
new file mode 100644
index 0000000000..a7f2c509c7
--- /dev/null
+++ b/lib/std/c/darwin/x86_64.zig
@@ -0,0 +1,30 @@
+pub const exception_state = extern struct {
+ trapno: u16,
+ cpu: u16,
+ err: u32,
+ faultvaddr: u64,
+};
+
+pub const thread_state = extern struct {
+ rax: u64,
+ rbx: u64,
+ rcx: u64,
+ rdx: u64,
+ rdi: u64,
+ rsi: u64,
+ rbp: u64,
+ rsp: u64,
+ r8: u64,
+ r9: u64,
+ r10: u64,
+ r11: u64,
+ r12: u64,
+ r13: u64,
+ r14: u64,
+ r15: u64,
+ rip: u64,
+ rflags: u64,
+ cs: u64,
+ fs: u64,
+ gs: u64,
+};
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index a7bfda1cd2..69a68faad6 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1610,9 +1610,13 @@ fn getDebugInfoAllocator() mem.Allocator {
/// Whether or not the current target can print useful debug information when a segfault occurs.
pub const have_segfault_handling_support = switch (native_os) {
- .linux, .netbsd, .solaris => true,
- .macos => native_arch == .x86_64,
- .windows => true,
+ .linux,
+ .macos,
+ .netbsd,
+ .solaris,
+ .windows,
+ => true,
+
.freebsd, .openbsd => @hasDecl(os.system, "ucontext_t"),
else => false,
};
@@ -1726,9 +1730,15 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
},
.aarch64 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
- const ip = @intCast(usize, ctx.mcontext.pc);
+ const ip = switch (native_os) {
+ .macos => @intCast(usize, ctx.mcontext.ss.pc),
+ else => @intCast(usize, ctx.mcontext.pc),
+ };
// x29 is the ABI-designated frame pointer
- const bp = @intCast(usize, ctx.mcontext.regs[29]);
+ const bp = switch (native_os) {
+ .macos => @intCast(usize, ctx.mcontext.ss.fp),
+ else => @intCast(usize, ctx.mcontext.regs[29]),
+ };
dumpStackTraceFromBase(bp, ip);
},
else => {},
From 722d4a11bbba4052558f6f69b7e710d1206f3355 Mon Sep 17 00:00:00 2001
From: John Schmidt
Date: Fri, 4 Feb 2022 20:21:15 +0100
Subject: [PATCH 0078/2031] stage2: implement @sqrt for f{16,32,64}
Support for f128, comptime_float, and c_longdouble require improvements
to compiler_rt and will implemented in a later PR. Some of the code in
this commit could be made more generic, for instance `llvm.airSqrt`
could probably be `llvm.airUnaryMath`, but let's cross that
bridge when we get to it.
---
src/Air.zig | 6 +++
src/Liveness.zig | 1 +
src/Sema.zig | 84 ++++++++++++++++++++++++++------
src/arch/aarch64/CodeGen.zig | 11 +++++
src/arch/arm/CodeGen.zig | 11 +++++
src/arch/riscv64/CodeGen.zig | 11 +++++
src/arch/wasm/CodeGen.zig | 2 +
src/arch/x86_64/CodeGen.zig | 11 +++++
src/codegen/c.zig | 8 +++
src/codegen/llvm.zig | 16 ++++++
src/print_air.zig | 1 +
src/value.zig | 22 +++++++++
test/behavior/floatop.zig | 42 ++++++++++++++++
test/behavior/floatop_stage1.zig | 34 -------------
test/behavior/math.zig | 12 ++---
15 files changed, 217 insertions(+), 55 deletions(-)
diff --git a/src/Air.zig b/src/Air.zig
index 14f8f96d38..6888f51963 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -237,6 +237,10 @@ pub const Inst = struct {
/// Uses the `ty_op` field.
popcount,
+ /// Computes the square root of a floating point number.
+ /// Uses the `un_op` field.
+ sqrt,
+
/// `<`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_lt,
@@ -749,6 +753,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.max,
=> return air.typeOf(datas[inst].bin_op.lhs),
+ .sqrt => return air.typeOf(datas[inst].un_op),
+
.cmp_lt,
.cmp_lte,
.cmp_eq,
diff --git a/src/Liveness.zig b/src/Liveness.zig
index f07e438246..bed7de1507 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -338,6 +338,7 @@ fn analyzeInst(
.ret_load,
.tag_name,
.error_name,
+ .sqrt,
=> {
const operand = inst_datas[inst].un_op;
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
diff --git a/src/Sema.zig b/src/Sema.zig
index 1dba136a48..b1772502bf 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -745,19 +745,19 @@ fn analyzeBodyInner(
.clz => try sema.zirClzCtz(block, inst, .clz, Value.clz),
.ctz => try sema.zirClzCtz(block, inst, .ctz, Value.ctz),
- .sqrt => try sema.zirUnaryMath(block, inst),
- .sin => try sema.zirUnaryMath(block, inst),
- .cos => try sema.zirUnaryMath(block, inst),
- .exp => try sema.zirUnaryMath(block, inst),
- .exp2 => try sema.zirUnaryMath(block, inst),
- .log => try sema.zirUnaryMath(block, inst),
- .log2 => try sema.zirUnaryMath(block, inst),
- .log10 => try sema.zirUnaryMath(block, inst),
- .fabs => try sema.zirUnaryMath(block, inst),
- .floor => try sema.zirUnaryMath(block, inst),
- .ceil => try sema.zirUnaryMath(block, inst),
- .trunc => try sema.zirUnaryMath(block, inst),
- .round => try sema.zirUnaryMath(block, inst),
+ .sqrt => try sema.zirUnaryMath(block, inst, .sqrt),
+ .sin => try sema.zirUnaryMath(block, inst, .sin),
+ .cos => try sema.zirUnaryMath(block, inst, .cos),
+ .exp => try sema.zirUnaryMath(block, inst, .exp),
+ .exp2 => try sema.zirUnaryMath(block, inst, .exp2),
+ .log => try sema.zirUnaryMath(block, inst, .log),
+ .log2 => try sema.zirUnaryMath(block, inst, .log2),
+ .log10 => try sema.zirUnaryMath(block, inst, .log10),
+ .fabs => try sema.zirUnaryMath(block, inst, .fabs),
+ .floor => try sema.zirUnaryMath(block, inst, .floor),
+ .ceil => try sema.zirUnaryMath(block, inst, .ceil),
+ .trunc => try sema.zirUnaryMath(block, inst, .trunc),
+ .round => try sema.zirUnaryMath(block, inst, .round),
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
@@ -11010,10 +11010,64 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
return block.addUnOp(.error_name, operand);
}
-fn zirUnaryMath(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirUnaryMath(
+ sema: *Sema,
+ block: *Block,
+ inst: Zir.Inst.Index,
+ zir_tag: Zir.Inst.Tag,
+) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirUnaryMath", .{});
+ const operand = sema.resolveInst(inst_data.operand);
+ const operand_ty = sema.typeOf(operand);
+ const operand_zig_ty_tag = operand_ty.zigTypeTag();
+
+ const is_float = operand_zig_ty_tag == .Float or operand_zig_ty_tag == .ComptimeFloat;
+ if (!is_float) {
+ return sema.fail(block, src, "expected float type, found '{s}'", .{@tagName(operand_zig_ty_tag)});
+ }
+
+ switch (zir_tag) {
+ .sqrt => {
+ switch (operand_ty.tag()) {
+ .f128,
+ .comptime_float,
+ .c_longdouble,
+ => |t| return sema.fail(block, src, "TODO implement @sqrt for type '{s}'", .{@tagName(t)}),
+ else => {},
+ }
+
+ const maybe_operand_val = try sema.resolveMaybeUndefVal(block, src, operand);
+ if (maybe_operand_val) |val| {
+ if (val.isUndef())
+ return sema.addConstUndef(operand_ty);
+ const result_val = try val.sqrt(operand_ty, sema.arena);
+ return sema.addConstant(operand_ty, result_val);
+ }
+
+ try sema.requireRuntimeBlock(block, src);
+ return block.addUnOp(.sqrt, operand);
+ },
+
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .trunc,
+ .round,
+ => return sema.fail(block, src, "TODO: implement zirUnaryMath for ZIR tag '{s}'", .{@tagName(zir_tag)}),
+
+ else => unreachable,
+ }
}
fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 6e8f88a2a7..d0413af02f 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -528,6 +528,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
+ .sqrt => try self.airUnaryMath(inst),
+
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
@@ -1223,6 +1225,15 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 1859ce874f..fb473ef412 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -520,6 +520,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
+ .sqrt => try self.airUnaryMath(inst),
+
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
@@ -1377,6 +1379,15 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
// return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 612ff78bd6..ce5dc39bf8 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -507,6 +507,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
+ .sqrt => try self.airUnaryMath(inst),
+
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
@@ -1166,6 +1168,15 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 8e0ffac76b..b1e7a0d3a1 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1681,6 +1681,8 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
+ .sqrt,
+
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.int_to_float,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index a60b8c78f0..10a6120556 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -599,6 +599,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
+ .sqrt => try self.airUnaryMath(inst),
+
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
@@ -1578,6 +1580,15 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 44b904f031..fc3848df21 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1446,6 +1446,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul_sat => try airSatOp(f, inst, "muls_"),
.shl_sat => try airSatOp(f, inst, "shls_"),
+ .sqrt => try airSqrt(f, inst),
+
.add_with_overflow => try airAddWithOverflow(f, inst),
.sub_with_overflow => try airSubWithOverflow(f, inst),
.mul_with_overflow => try airMulWithOverflow(f, inst),
@@ -3393,6 +3395,12 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue {
return CValue.none;
}
+fn airSqrt(f: *Function, inst: Air.Inst.Index) !CValue {
+ _ = f;
+ _ = inst;
+ return f.fail("TODO: C backend: implement sqrt", .{});
+}
+
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
return switch (order) {
.Unordered => "memory_order_relaxed",
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index abf371c85b..5a2c21eece 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2050,6 +2050,8 @@ pub const FuncGen = struct {
.shr => try self.airShr(inst, false),
.shr_exact => try self.airShr(inst, true),
+ .sqrt => try self.airSqrt(inst),
+
.cmp_eq => try self.airCmp(inst, .eq),
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_gte => try self.airCmp(inst, .gte),
@@ -4211,6 +4213,20 @@ pub const FuncGen = struct {
}
}
+ fn airSqrt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ const operand_ty = self.air.typeOf(un_op);
+
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const fn_val = self.getIntrinsic("llvm.sqrt", &.{operand_llvm_ty});
+ const params = [_]*const llvm.Value{operand};
+
+ return self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, "");
+ }
+
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, prefix: [*:0]const u8) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
diff --git a/src/print_air.zig b/src/print_air.zig
index 6e1ed3f3d7..341e736b91 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -158,6 +158,7 @@ const Writer = struct {
.ret_load,
.tag_name,
.error_name,
+ .sqrt,
=> try w.writeUnOp(s, inst),
.breakpoint,
diff --git a/src/value.zig b/src/value.zig
index cc6827b0cc..4bb0a58aed 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -3265,6 +3265,28 @@ pub const Value = extern union {
}
}
+ pub fn sqrt(val: Value, float_type: Type, arena: Allocator) !Value {
+ switch (float_type.tag()) {
+ .f16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @sqrt(f));
+ },
+ .f32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @sqrt(f));
+ },
+ .f64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @sqrt(f));
+ },
+
+ // TODO: implement @sqrt for these types
+ .f128, .comptime_float, .c_longdouble => unreachable,
+
+ else => unreachable,
+ }
+ }
+
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index 20ef4ce68d..7807c690f6 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -72,3 +72,45 @@ test "negative f128 floatToInt at compile-time" {
var b = @floatToInt(i64, a);
try expect(@as(i64, -2) == b);
}
+
+test "@sqrt" {
+ comptime try testSqrt();
+ try testSqrt();
+}
+
+fn testSqrt() !void {
+ {
+ var a: f16 = 4;
+ try expect(@sqrt(a) == 2);
+ }
+ {
+ var a: f32 = 9;
+ try expect(@sqrt(a) == 3);
+ var b: f32 = 1.1;
+ try expect(math.approxEqAbs(f32, @sqrt(b), 1.0488088481701516, epsilon));
+ }
+ {
+ var a: f64 = 25;
+ try expect(@sqrt(a) == 5);
+ }
+}
+
+test "more @sqrt f16 tests" {
+ // TODO these are not all passing at comptime
+ try expect(@sqrt(@as(f16, 0.0)) == 0.0);
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 2.0)), 1.414214, epsilon));
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 3.6)), 1.897367, epsilon));
+ try expect(@sqrt(@as(f16, 4.0)) == 2.0);
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 7.539840)), 2.745877, epsilon));
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 19.230934)), 4.385309, epsilon));
+ try expect(@sqrt(@as(f16, 64.0)) == 8.0);
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 64.1)), 8.006248, epsilon));
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 8942.230469)), 94.563370, epsilon));
+
+ // special cases
+ try expect(math.isPositiveInf(@sqrt(@as(f16, math.inf(f16)))));
+ try expect(@sqrt(@as(f16, 0.0)) == 0.0);
+ try expect(@sqrt(@as(f16, -0.0)) == -0.0);
+ try expect(math.isNan(@sqrt(@as(f16, -1.0))));
+ try expect(math.isNan(@sqrt(@as(f16, math.nan(f16)))));
+}
diff --git a/test/behavior/floatop_stage1.zig b/test/behavior/floatop_stage1.zig
index 303288a118..cd11f41b40 100644
--- a/test/behavior/floatop_stage1.zig
+++ b/test/behavior/floatop_stage1.zig
@@ -14,20 +14,6 @@ test "@sqrt" {
}
fn testSqrt() !void {
- {
- var a: f16 = 4;
- try expect(@sqrt(a) == 2);
- }
- {
- var a: f32 = 9;
- try expect(@sqrt(a) == 3);
- var b: f32 = 1.1;
- try expect(math.approxEqAbs(f32, @sqrt(b), 1.0488088481701516, epsilon));
- }
- {
- var a: f64 = 25;
- try expect(@sqrt(a) == 5);
- }
if (has_f80_rt) {
var a: f80 = 25;
try expect(@sqrt(a) == 5);
@@ -51,26 +37,6 @@ fn testSqrt() !void {
}
}
-test "more @sqrt f16 tests" {
- // TODO these are not all passing at comptime
- try expect(@sqrt(@as(f16, 0.0)) == 0.0);
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 2.0)), 1.414214, epsilon));
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 3.6)), 1.897367, epsilon));
- try expect(@sqrt(@as(f16, 4.0)) == 2.0);
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 7.539840)), 2.745877, epsilon));
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 19.230934)), 4.385309, epsilon));
- try expect(@sqrt(@as(f16, 64.0)) == 8.0);
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 64.1)), 8.006248, epsilon));
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 8942.230469)), 94.563370, epsilon));
-
- // special cases
- try expect(math.isPositiveInf(@sqrt(@as(f16, math.inf(f16)))));
- try expect(@sqrt(@as(f16, 0.0)) == 0.0);
- try expect(@sqrt(@as(f16, -0.0)) == -0.0);
- try expect(math.isNan(@sqrt(@as(f16, -1.0))));
- try expect(math.isNan(@sqrt(@as(f16, math.nan(f16)))));
-}
-
test "@sin" {
comptime try testSin();
try testSin();
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index a1243eb7c1..c23e8ebe3e 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -792,8 +792,6 @@ fn remdiv(comptime T: type) !void {
}
test "@sqrt" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
try testSqrt(f64, 12.0);
comptime try testSqrt(f64, 12.0);
try testSqrt(f32, 13.0);
@@ -801,10 +799,12 @@ test "@sqrt" {
try testSqrt(f16, 13.0);
comptime try testSqrt(f16, 13.0);
- const x = 14.0;
- const y = x * x;
- const z = @sqrt(y);
- comptime try expect(z == x);
+ if (builtin.zig_backend == .stage1) {
+ const x = 14.0;
+ const y = x * x;
+ const z = @sqrt(y);
+ comptime try expect(z == x);
+ }
}
fn testSqrt(comptime T: type, x: T) !void {
From a028488384c599aa997ba04bbd5ed98f2172630c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 7 Feb 2022 16:48:37 -0700
Subject: [PATCH 0079/2031] Sema: clean up zirUnaryMath
* pass air_tag instead of zir_tag
* also pass eval function so that the branch only happens once and the
body of zirUnaryMath is simplified
* Value.sqrt: update to handle f80 and f128 in the normalized way that
includes handling c_longdouble.
Semi-related change: fix incorrect sqrt builtin name for f80 in stage1.
---
src/Sema.zig | 81 +++++++++++++-----------------------------
src/stage1/codegen.cpp | 2 +-
src/value.zig | 28 ++++++++++-----
3 files changed, 44 insertions(+), 67 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index b1772502bf..72dfb4420b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -745,19 +745,19 @@ fn analyzeBodyInner(
.clz => try sema.zirClzCtz(block, inst, .clz, Value.clz),
.ctz => try sema.zirClzCtz(block, inst, .ctz, Value.ctz),
- .sqrt => try sema.zirUnaryMath(block, inst, .sqrt),
- .sin => try sema.zirUnaryMath(block, inst, .sin),
- .cos => try sema.zirUnaryMath(block, inst, .cos),
- .exp => try sema.zirUnaryMath(block, inst, .exp),
- .exp2 => try sema.zirUnaryMath(block, inst, .exp2),
- .log => try sema.zirUnaryMath(block, inst, .log),
- .log2 => try sema.zirUnaryMath(block, inst, .log2),
- .log10 => try sema.zirUnaryMath(block, inst, .log10),
- .fabs => try sema.zirUnaryMath(block, inst, .fabs),
- .floor => try sema.zirUnaryMath(block, inst, .floor),
- .ceil => try sema.zirUnaryMath(block, inst, .ceil),
- .trunc => try sema.zirUnaryMath(block, inst, .trunc),
- .round => try sema.zirUnaryMath(block, inst, .round),
+ .sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt),
+ .sin => @panic("TODO"),
+ .cos => @panic("TODO"),
+ .exp => @panic("TODO"),
+ .exp2 => @panic("TODO"),
+ .log => @panic("TODO"),
+ .log2 => @panic("TODO"),
+ .log10 => @panic("TODO"),
+ .fabs => @panic("TODO"),
+ .floor => @panic("TODO"),
+ .ceil => @panic("TODO"),
+ .trunc => @panic("TODO"),
+ .round => @panic("TODO"),
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
@@ -11014,60 +11014,27 @@ fn zirUnaryMath(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
- zir_tag: Zir.Inst.Tag,
+ air_tag: Air.Inst.Tag,
+ eval: fn (Value, Type, Allocator, std.Target) Allocator.Error!Value,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
const operand = sema.resolveInst(inst_data.operand);
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = sema.typeOf(operand);
- const operand_zig_ty_tag = operand_ty.zigTypeTag();
+ try sema.checkFloatType(block, operand_src, operand_ty);
- const is_float = operand_zig_ty_tag == .Float or operand_zig_ty_tag == .ComptimeFloat;
- if (!is_float) {
- return sema.fail(block, src, "expected float type, found '{s}'", .{@tagName(operand_zig_ty_tag)});
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| {
+ if (operand_val.isUndef()) return sema.addConstUndef(operand_ty);
+ const target = sema.mod.getTarget();
+ const result_val = try eval(operand_val, operand_ty, sema.arena, target);
+ return sema.addConstant(operand_ty, result_val);
}
- switch (zir_tag) {
- .sqrt => {
- switch (operand_ty.tag()) {
- .f128,
- .comptime_float,
- .c_longdouble,
- => |t| return sema.fail(block, src, "TODO implement @sqrt for type '{s}'", .{@tagName(t)}),
- else => {},
- }
-
- const maybe_operand_val = try sema.resolveMaybeUndefVal(block, src, operand);
- if (maybe_operand_val) |val| {
- if (val.isUndef())
- return sema.addConstUndef(operand_ty);
- const result_val = try val.sqrt(operand_ty, sema.arena);
- return sema.addConstant(operand_ty, result_val);
- }
-
- try sema.requireRuntimeBlock(block, src);
- return block.addUnOp(.sqrt, operand);
- },
-
- .sin,
- .cos,
- .exp,
- .exp2,
- .log,
- .log2,
- .log10,
- .fabs,
- .floor,
- .ceil,
- .trunc,
- .round,
- => return sema.fail(block, src, "TODO: implement zirUnaryMath for ZIR tag '{s}'", .{@tagName(zir_tag)}),
-
- else => unreachable,
- }
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addUnOp(air_tag, operand);
}
fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 02f84beeab..c06f71e834 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -6996,7 +6996,7 @@ static LLVMValueRef ir_render_soft_f80_float_op(CodeGen *g, Stage1Air *executabl
const char *func_name;
switch (instruction->fn_id) {
case BuiltinFnIdSqrt:
- func_name = "__sqrt";
+ func_name = "__sqrtx";
break;
case BuiltinFnIdSin:
func_name = "__sinx";
diff --git a/src/value.zig b/src/value.zig
index 4bb0a58aed..23a04f2e5a 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -3265,24 +3265,34 @@ pub const Value = extern union {
}
}
- pub fn sqrt(val: Value, float_type: Type, arena: Allocator) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const f = val.toFloat(f16);
return Value.Tag.float_16.create(arena, @sqrt(f));
},
- .f32 => {
+ 32 => {
const f = val.toFloat(f32);
return Value.Tag.float_32.create(arena, @sqrt(f));
},
- .f64 => {
+ 64 => {
const f = val.toFloat(f64);
return Value.Tag.float_64.create(arena, @sqrt(f));
},
-
- // TODO: implement @sqrt for these types
- .f128, .comptime_float, .c_longdouble => unreachable,
-
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt __sqrtx");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @sqrt(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt sqrtq");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @sqrt(f));
+ },
else => unreachable,
}
}
From a15d2d582b4015af0f72caa2b5f09b7e665e2c1e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 7 Feb 2022 17:11:26 -0800
Subject: [PATCH 0080/2031] stage2: fix crash_report segfault compile error
Regressed in 05cf69209e44c59f838f94ab355485d2d3a0432a.
---
src/crash_report.zig | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/crash_report.zig b/src/crash_report.zig
index 724269556b..7e72c64800 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -4,6 +4,7 @@ const debug = std.debug;
const os = std.os;
const io = std.io;
const print_zir = @import("print_zir.zig");
+const native_os = builtin.os.tag;
const Module = @import("Module.zig");
const Sema = @import("Sema.zig");
@@ -233,9 +234,15 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
},
.aarch64 => ctx: {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
- const ip = @intCast(usize, ctx.mcontext.pc);
+ const ip = switch (native_os) {
+ .macos => @intCast(usize, ctx.mcontext.ss.pc),
+ else => @intCast(usize, ctx.mcontext.pc),
+ };
// x29 is the ABI-designated frame pointer
- const bp = @intCast(usize, ctx.mcontext.regs[29]);
+ const bp = switch (native_os) {
+ .macos => @intCast(usize, ctx.mcontext.ss.fp),
+ else => @intCast(usize, ctx.mcontext.regs[29]),
+ };
break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
},
else => .not_supported,
From fc59a0406157dbd0704cf9f05cd04b6a8c87d7df Mon Sep 17 00:00:00 2001
From: Jan Philipp Hafer
Date: Mon, 7 Feb 2022 00:20:13 +0100
Subject: [PATCH 0081/2031] compiler_rt: add subo
- approach by Hacker's Delight with wrapping subtraction
- performance expected to be similar to addo
- tests with all relevant combinations of min,max with -1,0,+1 and all
combinations of sequences +-1,2,4..,max
---
lib/std/special/compiler_rt.zig | 6 ++
lib/std/special/compiler_rt/addo.zig | 2 +-
lib/std/special/compiler_rt/subo.zig | 38 +++++++++
lib/std/special/compiler_rt/subodi4_test.zig | 81 +++++++++++++++++++
lib/std/special/compiler_rt/subosi4_test.zig | 82 ++++++++++++++++++++
lib/std/special/compiler_rt/suboti4_test.zig | 81 +++++++++++++++++++
6 files changed, 289 insertions(+), 1 deletion(-)
create mode 100644 lib/std/special/compiler_rt/subo.zig
create mode 100644 lib/std/special/compiler_rt/subodi4_test.zig
create mode 100644 lib/std/special/compiler_rt/subosi4_test.zig
create mode 100644 lib/std/special/compiler_rt/suboti4_test.zig
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index 286237aa7b..36f703464a 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -112,6 +112,12 @@ comptime {
@export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
const __addoti4 = @import("compiler_rt/addo.zig").__addoti4;
@export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
+ const __subosi4 = @import("compiler_rt/subo.zig").__subosi4;
+ @export(__subosi4, .{ .name = "__subosi4", .linkage = linkage });
+ const __subodi4 = @import("compiler_rt/subo.zig").__subodi4;
+ @export(__subodi4, .{ .name = "__subodi4", .linkage = linkage });
+ const __suboti4 = @import("compiler_rt/subo.zig").__suboti4;
+ @export(__suboti4, .{ .name = "__suboti4", .linkage = linkage });
const __mulosi4 = @import("compiler_rt/mulo.zig").__mulosi4;
@export(__mulosi4, .{ .name = "__mulosi4", .linkage = linkage });
const __mulodi4 = @import("compiler_rt/mulo.zig").__mulodi4;
diff --git a/lib/std/special/compiler_rt/addo.zig b/lib/std/special/compiler_rt/addo.zig
index 966c74cb8e..91ed15747c 100644
--- a/lib/std/special/compiler_rt/addo.zig
+++ b/lib/std/special/compiler_rt/addo.zig
@@ -15,7 +15,7 @@ inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST
// and the sign of a+b+carry is the same as a (or equivalently b).
// Slower routine: res = ~(a ^ b) & ((sum ^ a)
// Faster routine: res = (sum ^ a) & (sum ^ b)
- // Oerflow occured, iff (res < 0)
+ // Overflow occured, iff (res < 0)
if (((sum ^ a) & (sum ^ b)) < 0)
overflow.* = 1;
return sum;
diff --git a/lib/std/special/compiler_rt/subo.zig b/lib/std/special/compiler_rt/subo.zig
new file mode 100644
index 0000000000..af28c6eead
--- /dev/null
+++ b/lib/std/special/compiler_rt/subo.zig
@@ -0,0 +1,38 @@
+const builtin = @import("builtin");
+
+// subo - subtract overflow
+// * return a-%b.
+// * return if a-b overflows => 1 else => 0
+// - suboXi4_generic as default
+
+inline fn suboXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ @setRuntimeSafety(builtin.is_test);
+ overflow.* = 0;
+ var sum: ST = a -% b;
+ // Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
+ // Let sum = a -% b == a - b - carry == wraparound subtraction.
+ // Overflow in a-b-carry occurs, iff a and b have opposite signs
+ // and the sign of a-b-carry is opposite of a (or equivalently same as b).
+ // Faster routine: res = (a ^ b) & (sum ^ a)
+ // Slower routine: res = (sum^a) & ~(sum^b)
+ // Overflow occured, iff (res < 0)
+ if (((a ^ b) & (sum ^ a)) < 0)
+ overflow.* = 1;
+ return sum;
+}
+
+pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+ return suboXi4_generic(i32, a, b, overflow);
+}
+pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+ return suboXi4_generic(i64, a, b, overflow);
+}
+pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+ return suboXi4_generic(i128, a, b, overflow);
+}
+
+test {
+ _ = @import("subosi4_test.zig");
+ _ = @import("subodi4_test.zig");
+ _ = @import("suboti4_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/subodi4_test.zig b/lib/std/special/compiler_rt/subodi4_test.zig
new file mode 100644
index 0000000000..687e97c71c
--- /dev/null
+++ b/lib/std/special/compiler_rt/subodi4_test.zig
@@ -0,0 +1,81 @@
+const subo = @import("subo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__subodi4(a: i64, b: i64) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = subo.__subodi4(a, b, &result_ov);
+ var expected: i64 = simple_subodi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+// 2 cases on evaluating `a-b`:
+// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a0 and a-b > max <=> a>max+b
+// `-b` evaluation may overflow, iff b==min, but this is handled by the hardware
+pub fn simple_subodi4(a: i64, b: i64, overflow: *c_int) i64 {
+ overflow.* = 0;
+ const min: i64 = math.minInt(i64);
+ const max: i64 = math.maxInt(i64);
+ if (((b > 0) and (a < min + b)) or
+ ((b < 0) and (a > max + b)))
+ overflow.* = 1;
+ return a -% b;
+}
+
+test "subodi3" {
+ const min: i64 = math.minInt(i64);
+ const max: i64 = math.maxInt(i64);
+ var i: i64 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__subodi4(i, i);
+ try test__subodi4(-i, -i);
+ try test__subodi4(i, -i);
+ try test__subodi4(-i, i);
+ }
+
+ // edge cases
+ // 0 - 0 = 0
+ // MIN - MIN = 0
+ // MAX - MAX = 0
+ // 0 - MIN overflow
+ // 0 - MAX = MIN+1
+ // MIN - 0 = MIN
+ // MAX - 0 = MAX
+ // MIN - MAX overflow
+ // MAX - MIN overflow
+ try test__subodi4(0, 0);
+ try test__subodi4(min, min);
+ try test__subodi4(max, max);
+ try test__subodi4(0, min);
+ try test__subodi4(0, max);
+ try test__subodi4(min, 0);
+ try test__subodi4(max, 0);
+ try test__subodi4(min, max);
+ try test__subodi4(max, min);
+
+ // derived edge cases
+ // MIN+1 - MIN = 1
+ // MAX-1 - MAX = -1
+ // 1 - MIN overflow
+ // -1 - MIN = MAX
+ // -1 - MAX = MIN
+ // +1 - MAX = MIN+2
+ // MIN - 1 overflow
+ // MIN - -1 = MIN+1
+ // MAX - 1 = MAX-1
+ // MAX - -1 overflow
+ try test__subodi4(min + 1, min);
+ try test__subodi4(max - 1, max);
+ try test__subodi4(1, min);
+ try test__subodi4(-1, min);
+ try test__subodi4(-1, max);
+ try test__subodi4(1, max);
+ try test__subodi4(min, 1);
+ try test__subodi4(min, -1);
+ try test__subodi4(max, -1);
+ try test__subodi4(max, 1);
+}
diff --git a/lib/std/special/compiler_rt/subosi4_test.zig b/lib/std/special/compiler_rt/subosi4_test.zig
new file mode 100644
index 0000000000..6c7ae97c25
--- /dev/null
+++ b/lib/std/special/compiler_rt/subosi4_test.zig
@@ -0,0 +1,82 @@
+const subo = @import("subo.zig");
+const testing = @import("std").testing;
+
+fn test__subosi4(a: i32, b: i32) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = subo.__subosi4(a, b, &result_ov);
+ var expected: i32 = simple_subosi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+// 2 cases on evaluating `a-b`:
+// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a0 and a-b > max <=> a>max+b
+// `-b` evaluation may overflow, iff b==min, but this is handled by the hardware
+pub fn simple_subosi4(a: i32, b: i32, overflow: *c_int) i32 {
+ overflow.* = 0;
+ const min: i32 = -2147483648;
+ const max: i32 = 2147483647;
+ if (((b > 0) and (a < min + b)) or
+ ((b < 0) and (a > max + b)))
+ overflow.* = 1;
+ return a -% b;
+}
+
+test "subosi3" {
+ // -2^31 <= i32 <= 2^31-1
+ // 2^31 = 2147483648
+ // 2^31-1 = 2147483647
+ const min: i32 = -2147483648;
+ const max: i32 = 2147483647;
+ var i: i32 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__subosi4(i, i);
+ try test__subosi4(-i, -i);
+ try test__subosi4(i, -i);
+ try test__subosi4(-i, i);
+ }
+
+ // edge cases
+ // 0 - 0 = 0
+ // MIN - MIN = 0
+ // MAX - MAX = 0
+ // 0 - MIN overflow
+ // 0 - MAX = MIN+1
+ // MIN - 0 = MIN
+ // MAX - 0 = MAX
+ // MIN - MAX overflow
+ // MAX - MIN overflow
+ try test__subosi4(0, 0);
+ try test__subosi4(min, min);
+ try test__subosi4(max, max);
+ try test__subosi4(0, min);
+ try test__subosi4(0, max);
+ try test__subosi4(min, 0);
+ try test__subosi4(max, 0);
+ try test__subosi4(min, max);
+ try test__subosi4(max, min);
+
+ // derived edge cases
+ // MIN+1 - MIN = 1
+ // MAX-1 - MAX = -1
+ // 1 - MIN overflow
+ // -1 - MIN = MAX
+ // -1 - MAX = MIN
+ // +1 - MAX = MIN+2
+ // MIN - 1 overflow
+ // MIN - -1 = MIN+1
+ // MAX - 1 = MAX-1
+ // MAX - -1 overflow
+ try test__subosi4(min + 1, min);
+ try test__subosi4(max - 1, max);
+ try test__subosi4(1, min);
+ try test__subosi4(-1, min);
+ try test__subosi4(-1, max);
+ try test__subosi4(1, max);
+ try test__subosi4(min, 1);
+ try test__subosi4(min, -1);
+ try test__subosi4(max, -1);
+ try test__subosi4(max, 1);
+}
diff --git a/lib/std/special/compiler_rt/suboti4_test.zig b/lib/std/special/compiler_rt/suboti4_test.zig
new file mode 100644
index 0000000000..f42fe3edce
--- /dev/null
+++ b/lib/std/special/compiler_rt/suboti4_test.zig
@@ -0,0 +1,81 @@
+const subo = @import("subo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__suboti4(a: i128, b: i128) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = subo.__suboti4(a, b, &result_ov);
+ var expected: i128 = simple_suboti4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+// 2 cases on evaluating `a-b`:
+// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a0 and a-b > max <=> a>max+b
+// `-b` evaluation may overflow, iff b==min, but this is handled by the hardware
+pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
+ overflow.* = 0;
+ const min: i128 = math.minInt(i128);
+ const max: i128 = math.maxInt(i128);
+ if (((b > 0) and (a < min + b)) or
+ ((b < 0) and (a > max + b)))
+ overflow.* = 1;
+ return a -% b;
+}
+
+test "suboti3" {
+ const min: i128 = math.minInt(i128);
+ const max: i128 = math.maxInt(i128);
+ var i: i128 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__suboti4(i, i);
+ try test__suboti4(-i, -i);
+ try test__suboti4(i, -i);
+ try test__suboti4(-i, i);
+ }
+
+ // edge cases
+ // 0 - 0 = 0
+ // MIN - MIN = 0
+ // MAX - MAX = 0
+ // 0 - MIN overflow
+ // 0 - MAX = MIN+1
+ // MIN - 0 = MIN
+ // MAX - 0 = MAX
+ // MIN - MAX overflow
+ // MAX - MIN overflow
+ try test__suboti4(0, 0);
+ try test__suboti4(min, min);
+ try test__suboti4(max, max);
+ try test__suboti4(0, min);
+ try test__suboti4(0, max);
+ try test__suboti4(min, 0);
+ try test__suboti4(max, 0);
+ try test__suboti4(min, max);
+ try test__suboti4(max, min);
+
+ // derived edge cases
+ // MIN+1 - MIN = 1
+ // MAX-1 - MAX = -1
+ // 1 - MIN overflow
+ // -1 - MIN = MAX
+ // -1 - MAX = MIN
+ // +1 - MAX = MIN+2
+ // MIN - 1 overflow
+ // MIN - -1 = MIN+1
+ // MAX - 1 = MAX-1
+ // MAX - -1 overflow
+ try test__suboti4(min + 1, min);
+ try test__suboti4(max - 1, max);
+ try test__suboti4(1, min);
+ try test__suboti4(-1, min);
+ try test__suboti4(-1, max);
+ try test__suboti4(1, max);
+ try test__suboti4(min, 1);
+ try test__suboti4(min, -1);
+ try test__suboti4(max, -1);
+ try test__suboti4(max, 1);
+}
From f50203c83667ed3ad0c57fdc953322a5f9c221ac Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Sun, 6 Feb 2022 12:49:42 +0100
Subject: [PATCH 0082/2031] wasm: update test runner
This updates the test runner for stage2 to emit to stdout with the passed, skipped and failed tests
similar to the LLVM backend.
Another change to this is the start function, as it's now more in line with stage1's.
The stage2 test infrastructure for wasm/wasi has been updated to reflect this as well.
---
lib/std/special/test_runner.zig | 2 +-
lib/std/start.zig | 8 +-
src/arch/wasm/CodeGen.zig | 2 +-
src/link/Wasm.zig | 16 ++-
src/link/Wasm/Symbol.zig | 5 +-
test/stage2/wasm.zig | 225 ++++++++++++++++----------------
6 files changed, 128 insertions(+), 130 deletions(-)
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index fe20e10141..9848cb5a3e 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -144,7 +144,7 @@ pub fn main2() anyerror!void {
}
};
}
- if (builtin.zig_backend == .stage2_llvm) {
+ if (builtin.zig_backend == .stage2_llvm or builtin.zig_backend == .stage2_wasm) {
const passed = builtin.test_functions.len - skipped - failed;
const stderr = std.io.getStdErr();
writeInt(stderr, passed) catch {};
diff --git a/lib/std/start.zig b/lib/std/start.zig
index a3cc3d00a8..6e28ca61a3 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -31,7 +31,7 @@ comptime {
} else if (builtin.os.tag == .windows) {
@export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" });
} else if (builtin.os.tag == .wasi and @hasDecl(root, "main")) {
- @export(wasmMain2, .{ .name = "_start" });
+ @export(wasiMain2, .{ .name = "_start" });
} else {
if (!@hasDecl(root, "_start")) {
@export(_start2, .{ .name = "_start" });
@@ -100,17 +100,17 @@ fn callMain2() noreturn {
exit2(0);
}
-fn wasmMain2() u8 {
+fn wasiMain2() noreturn {
switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
.Void => {
root.main();
- return 0;
+ std.os.wasi.proc_exit(0);
},
.Int => |info| {
if (info.bits != 8 or info.signedness == .signed) {
@compileError(bad_main_ret);
}
- return root.main();
+ std.os.wasi.proc_exit(root.main());
},
else => @compileError("Bad return type main"),
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index b1e7a0d3a1..9f0736f055 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -954,7 +954,7 @@ pub const DeclGen = struct {
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = extern_fn.data.owner_decl;
var func_type = try genFunctype(self.gpa, ext_decl.ty, self.target());
- func_type.deinit(self.gpa);
+ defer func_type.deinit(self.gpa);
ext_decl.fn_link.wasm.type_index = try self.bin_file.putOrGetFuncType(func_type);
return Result{ .appended = {} };
} else {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index e6988e9232..d62f3a4201 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -260,7 +260,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
}
- if (!decl.ty.hasRuntimeBits()) return;
+
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();
@@ -297,8 +297,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, code: []const u8) !void {
if (decl.isExtern()) {
- try self.addOrUpdateImport(decl);
- return;
+ return self.addOrUpdateImport(decl);
}
if (code.len == 0) return;
@@ -407,16 +406,18 @@ pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
self.symbols.items[atom.sym_index].tag = .dead; // to ensure it does not end in the names section
for (atom.locals.items) |local_atom| {
self.symbols.items[local_atom.sym_index].tag = .dead; // also for any local symbol
+ self.symbols_free_list.append(self.base.allocator, local_atom.sym_index) catch {};
}
- atom.deinit(self.base.allocator);
if (decl.isExtern()) {
- const import = self.imports.fetchRemove(decl.link.wasm.sym_index).?.value;
+ const import = self.imports.fetchRemove(atom.sym_index).?.value;
switch (import.kind) {
.function => self.imported_functions_count -= 1,
else => unreachable,
}
}
+
+ atom.deinit(self.base.allocator);
}
/// Appends a new entry to the indirect function table
@@ -441,10 +442,13 @@ fn addOrUpdateImport(self: *Wasm, decl: *Module.Decl) !void {
switch (decl.ty.zigTypeTag()) {
.Fn => {
const gop = try self.imports.getOrPut(self.base.allocator, symbol_index);
+ const module_name = if (decl.getExternFn().?.lib_name) |lib_name| blk: {
+ break :blk std.mem.sliceTo(lib_name, 0);
+ } else self.host_name;
if (!gop.found_existing) {
self.imported_functions_count += 1;
gop.value_ptr.* = .{
- .module_name = self.host_name,
+ .module_name = module_name,
.name = std.mem.span(symbol.name),
.kind = .{ .function = decl.fn_link.wasm.type_index },
};
diff --git a/src/link/Wasm/Symbol.zig b/src/link/Wasm/Symbol.zig
index 0f2247f5d1..53d96d6382 100644
--- a/src/link/Wasm/Symbol.zig
+++ b/src/link/Wasm/Symbol.zig
@@ -142,19 +142,20 @@ pub fn format(self: Symbol, comptime fmt: []const u8, options: std.fmt.FormatOpt
_ = fmt;
_ = options;
- const kind_fmt: u8 = switch (self.kind) {
+ const kind_fmt: u8 = switch (self.tag) {
.function => 'F',
.data => 'D',
.global => 'G',
.section => 'S',
.event => 'E',
.table => 'T',
+ .dead => '-',
};
const visible: []const u8 = if (self.isVisible()) "yes" else "no";
const binding: []const u8 = if (self.isLocal()) "local" else "global";
try writer.print(
"{c} binding={s} visible={s} id={d} name={s}",
- .{ kind_fmt, binding, visible, self.index(), self.name },
+ .{ kind_fmt, binding, visible, self.index, self.name },
);
}
diff --git a/test/stage2/wasm.zig b/test/stage2/wasm.zig
index 20ff65e0a7..7c7e203b40 100644
--- a/test/stage2/wasm.zig
+++ b/test/stage2/wasm.zig
@@ -11,37 +11,31 @@ pub fn addCases(ctx: *TestContext) !void {
var case = ctx.exe("wasm function calls", wasi);
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ foo();
\\ bar();
- \\ return 42;
\\}
\\fn foo() void {
\\ bar();
\\ bar();
\\}
\\fn bar() void {}
- ,
- "42\n",
- );
+ , "");
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ bar();
\\ foo();
\\ foo();
\\ bar();
\\ foo();
\\ bar();
- \\ return 42;
\\}
\\fn foo() void {
\\ bar();
\\}
\\fn bar() void {}
- ,
- "42\n",
- );
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -56,23 +50,22 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\fn bar() void {}
,
- "0\n",
+ "",
);
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ foo(10, 20);
- \\ return 5;
\\}
\\fn foo(x: u8, y: u8) void { _ = x; _ = y; }
- , "5\n");
+ , "");
}
{
var case = ctx.exe("wasm locals", wasi);
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ var i: u8 = 5;
\\ var y: f32 = 42.0;
\\ var x: u8 = 10;
@@ -80,38 +73,38 @@ pub fn addCases(ctx: *TestContext) !void {
\\ y;
\\ x;
\\ }
- \\ return i;
+ \\ if (i != 5) unreachable;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ var i: u8 = 5;
\\ var y: f32 = 42.0;
\\ _ = y;
\\ var x: u8 = 10;
\\ foo(i, x);
\\ i = x;
- \\ return i;
+ \\ if (i != 10) unreachable;
\\}
\\fn foo(x: u8, y: u8) void {
\\ _ = y;
\\ var i: u8 = 10;
\\ i = x;
\\}
- , "10\n");
+ , "");
}
{
var case = ctx.exe("wasm binary operands", wasi);
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ var i: u8 = 5;
\\ i += 20;
- \\ return i;
+ \\ if (i != 25) unreachable;
\\}
- , "25\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -119,7 +112,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i +% 1 != -2147483648) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -127,34 +120,34 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i +% 1 != 0) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 255;
\\ return i +% 1;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 5;
\\ i += 20;
\\ var result: u8 = foo(i, 10);
- \\ return result;
+ \\ return result - 35;
\\}
\\fn foo(x: u8, y: u8) u8 {
\\ return x + y;
\\}
- , "35\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 20;
\\ i -= 5;
- \\ return i;
+ \\ return i - 15;
\\}
- , "15\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -162,7 +155,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i -% 1 != 2147483647) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -170,26 +163,26 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i -% 1 != 63) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
- \\pub fn main() u8 {
+ \\pub fn main() void {
\\ var i: u4 = 0;
- \\ return i -% 1;
+ \\ if(i -% 1 != 15) unreachable;
\\}
- , "15\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 5;
\\ i -= 3;
\\ var result: u8 = foo(i, 10);
- \\ return result;
+ \\ return result - 8;
\\}
\\fn foo(x: u8, y: u8) u8 {
\\ return y - x;
\\}
- , "8\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -202,7 +195,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\fn foo(x: u32, y: u32) u32 {
\\ return x * y;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -211,7 +204,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (result != -2) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -219,7 +212,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i *% 3 != 1) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -227,7 +220,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i *% 3 != 1) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -240,31 +233,31 @@ pub fn addCases(ctx: *TestContext) !void {
\\fn foo(x: u32, y: u32) u32 {
\\ return x / y;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 5;
\\ i &= 6;
- \\ return i;
+ \\ return i - 4;
\\}
- , "4\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 5;
\\ i |= 6;
- \\ return i;
+ \\ return i - 7;
\\}
- , "7\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var i: u8 = 5;
\\ i ^= 6;
- \\ return i;
+ \\ return i - 3;
\\}
- , "3\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -273,7 +266,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -282,7 +275,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (!b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -291,7 +284,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (!b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -300,7 +293,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (!b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -309,7 +302,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -318,7 +311,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -327,7 +320,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -336,7 +329,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (!b) unreachable;
\\ return;
\\}
- , "0\n");
+ , "");
}
{
@@ -348,9 +341,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (i > @as(u8, 4)) {
\\ i += 10;
\\ }
- \\ return i;
+ \\ return i - 15;
\\}
- , "15\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -360,9 +353,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ } else {
\\ i = 2;
\\ }
- \\ return i;
+ \\ return i - 2;
\\}
- , "2\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -372,9 +365,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ } else if(i == @as(u8, 5)) {
\\ i = 20;
\\ }
- \\ return i;
+ \\ return i - 20;
\\}
- , "20\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -388,9 +381,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ i = 20;
\\ }
\\ }
- \\ return i;
+ \\ return i - 31;
\\}
- , "31\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -405,7 +398,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ const x = if(ok) @as(i32, 20) else @as(i32, 10);
\\ return x;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() void {
@@ -425,7 +418,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ };
\\ return val + 10;
\\}
- , "0\n");
+ , "");
}
{
@@ -438,9 +431,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ i += 1;
\\ }
\\
- \\ return i;
+ \\ return i - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -449,9 +442,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var x: u8 = 1;
\\ i += x;
\\ }
- \\ return i;
+ \\ return i - 10;
\\}
- , "10\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -461,9 +454,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ i += x;
\\ if (i == @as(u8, 5)) break;
\\ }
- \\ return i;
+ \\ return i - 5;
\\}
- , "5\n");
+ , "");
}
{
@@ -485,7 +478,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ }
\\ return;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\const Number = enum { One, Two, Three };
@@ -507,7 +500,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\fn assert(val: bool) void {
\\ if(!val) unreachable;
\\}
- , "0\n");
+ , "");
}
{
@@ -518,9 +511,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\
\\pub fn main() u8 {
\\ var example: Example = .{ .x = 5 };
- \\ return example.x;
+ \\ return example.x - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\const Example = struct { x: u8 };
@@ -528,18 +521,18 @@ pub fn addCases(ctx: *TestContext) !void {
\\pub fn main() u8 {
\\ var example: Example = .{ .x = 5 };
\\ example.x = 10;
- \\ return example.x;
+ \\ return example.x - 10;
\\}
- , "10\n");
+ , "");
case.addCompareOutput(
\\const Example = struct { x: u8, y: u8 };
\\
\\pub fn main() u8 {
\\ var example: Example = .{ .x = 5, .y = 10 };
- \\ return example.y + example.x;
+ \\ return example.y + example.x - 15;
\\}
- , "15\n");
+ , "");
case.addCompareOutput(
\\const Example = struct { x: u8, y: u8 };
@@ -549,9 +542,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var example2: Example = .{ .x = 10, .y = 20 };
\\
\\ example = example2;
- \\ return example.y + example.x;
+ \\ return example.y + example.x - 30;
\\}
- , "30\n");
+ , "");
case.addCompareOutput(
\\const Example = struct { x: u8, y: u8 };
@@ -560,9 +553,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var example: Example = .{ .x = 5, .y = 10 };
\\
\\ example = .{ .x = 10, .y = 20 };
- \\ return example.y + example.x;
+ \\ return example.y + example.x - 30;
\\}
- , "30\n");
+ , "");
}
{
@@ -578,9 +571,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ else => 5,
\\ };
\\
- \\ return a;
+ \\ return a - 2;
\\}
- , "2\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -592,9 +585,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ else => 5,
\\ };
\\
- \\ return a;
+ \\ return a - 3;
\\}
- , "3\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -606,9 +599,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ else => 5,
\\ };
\\
- \\ return a;
+ \\ return a - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\const MyEnum = enum { One, Two, Three };
@@ -621,9 +614,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ .Three => 3,
\\ };
\\
- \\ return a;
+ \\ return a - 2;
\\}
- , "2\n");
+ , "");
}
{
@@ -641,35 +634,35 @@ pub fn addCases(ctx: *TestContext) !void {
\\fn assert(b: bool) void {
\\ if (!b) unreachable;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var e: anyerror!u8 = 5;
\\ const i = e catch 10;
- \\ return i;
+ \\ return i - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var e: anyerror!u8 = error.Foo;
\\ const i = e catch 10;
- \\ return i;
+ \\ return i - 10;
\\}
- , "10\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var e = foo();
\\ const i = e catch 69;
- \\ return i;
+ \\ return i - 5;
\\}
\\
\\fn foo() anyerror!u8 {
\\ return 5;
\\}
- , "5\n");
+ , "");
}
{
@@ -679,24 +672,24 @@ pub fn addCases(ctx: *TestContext) !void {
\\pub fn main() u8 {
\\ var e = foo();
\\ const i = e catch 69;
- \\ return i;
+ \\ return i - 69;
\\}
\\
\\fn foo() anyerror!u8 {
\\ return error.Bruh;
\\}
- , "69\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var e = foo();
\\ const i = e catch 42;
- \\ return i;
+ \\ return i - 42;
\\}
\\
\\fn foo() anyerror!u8 {
\\ return error.Dab;
\\}
- , "42\n");
+ , "");
}
{
@@ -709,7 +702,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = y;
\\ return;
\\}
- , "0\n");
+ , "");
}
{
@@ -722,9 +715,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ if (x) |val| {
\\ y = val;
\\ }
- \\ return y;
+ \\ return y - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -735,22 +728,22 @@ pub fn addCases(ctx: *TestContext) !void {
\\ }
\\ return y;
\\}
- , "0\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var x: ?u8 = 5;
- \\ return x.?;
+ \\ return x.? - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
\\ var x: u8 = 5;
\\ var y: ?u8 = x;
- \\ return y.?;
+ \\ return y.? - 5;
\\}
- , "5\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -763,7 +756,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ }
\\ return 0;
\\}
- , "0\n");
+ , "");
}
{
@@ -774,13 +767,13 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var x: u8 = 0;
\\
\\ foo(&x);
- \\ return x;
+ \\ return x - 2;
\\}
\\
\\fn foo(x: *u8)void {
\\ x.* = 2;
\\}
- , "2\n");
+ , "");
case.addCompareOutput(
\\pub fn main() u8 {
@@ -788,7 +781,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\
\\ foo(&x);
\\ bar(&x);
- \\ return x;
+ \\ return x - 4;
\\}
\\
\\fn foo(x: *u8)void {
@@ -798,6 +791,6 @@ pub fn addCases(ctx: *TestContext) !void {
\\fn bar(x: *u8) void {
\\ x.* += 2;
\\}
- , "4\n");
+ , "");
}
}
From 9981b3fd2f7ab85146efa9feebe08a795411d131 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 18:05:54 +0100
Subject: [PATCH 0083/2031] stage2: tiny improvements all over the place
* pass more x64 behavior tests
* return with a TODO error when lowering a decl with no runtime bits
* insert some debug logs for tracing recursive descent down the
type-value tree when lowering types
* print `Decl`'s name when print debugging `decl_ref` value
---
src/arch/wasm/CodeGen.zig | 2 ++
src/arch/x86_64/CodeGen.zig | 11 +++++++----
src/codegen.zig | 14 ++++++++++++++
src/value.zig | 5 ++++-
test/behavior/bugs/1025.zig | 1 -
test/behavior/bugs/1277.zig | 1 -
test/behavior/bugs/1310.zig | 1 -
test/behavior/bugs/1500.zig | 1 -
8 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 9f0736f055..0361d4ffdd 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -970,6 +970,8 @@ pub const DeclGen = struct {
/// Generates the wasm bytecode for the declaration belonging to `Context`
fn genTypedValue(self: *DeclGen, ty: Type, val: Value) InnerError!Result {
+ log.debug("genTypedValue: ty = {}, val = {}", .{ ty, val });
+
const writer = self.code.writer();
if (val.isUndef()) {
try writer.writeByteNTimes(0xaa, @intCast(usize, ty.abiSize(self.target())));
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index a745a6df33..9abeadad5d 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -1973,6 +1973,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
if (self.liveness.isUnused(inst)) {
return MCValue.dead;
}
+
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
@@ -2190,6 +2191,7 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
}
fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
+ const abi_size = dst_ty.abiSize(self.target.*);
switch (dst_mcv) {
.none => unreachable,
.undef => unreachable,
@@ -2216,7 +2218,6 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
});
},
.immediate => |imm| {
- const abi_size = dst_ty.abiSize(self.target.*);
_ = try self.addInst(.{
.tag = mir_tag,
.ops = (Mir.Ops{
@@ -2226,7 +2227,11 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
});
},
.embedded_in_code, .memory => {
- return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
+ assert(abi_size <= 8);
+ self.register_manager.freezeRegs(&.{dst_reg});
+ defer self.register_manager.unfreezeRegs(&.{dst_reg});
+ const reg = try self.copyToTmpRegister(dst_ty, src_mcv);
+ return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg });
},
.got_load, .direct_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
@@ -2235,7 +2240,6 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- const abi_size = dst_ty.abiSize(self.target.*);
const adj_off = off + @intCast(i32, abi_size);
_ = try self.addInst(.{
.tag = mir_tag,
@@ -2259,7 +2263,6 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- const abi_size = dst_ty.abiSize(self.target.*);
if (abi_size > 8) {
return self.fail("TODO implement ADD/SUB/CMP for stack dst with large ABI", .{});
}
diff --git a/src/codegen.zig b/src/codegen.zig
index 059d2adc14..5873fd439c 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -150,6 +150,8 @@ pub fn generateSymbol(
const tracy = trace(@src());
defer tracy.end();
+ log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty, typed_value.val });
+
if (typed_value.val.isUndefDeep()) {
const target = bin_file.options.target;
const abi_size = try math.cast(usize, typed_value.ty.abiSize(target));
@@ -485,6 +487,18 @@ fn lowerDeclRef(
return Result{ .appended = {} };
}
+ const is_fn_body = decl.ty.zigTypeTag() == .Fn;
+ if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
+ return Result{
+ .fail = try ErrorMsg.create(
+ bin_file.allocator,
+ src_loc,
+ "TODO handle void types when lowering decl ref",
+ .{},
+ ),
+ };
+ }
+
if (decl.analysis != .complete) return error.AnalysisFail;
decl.markAlive();
const vaddr = vaddr: {
diff --git a/src/value.zig b/src/value.zig
index 23a04f2e5a..acc3fa3d74 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -711,7 +711,10 @@ pub const Value = extern union {
const decl = val.castTag(.decl_ref_mut).?.data.decl;
return out_stream.print("(decl_ref_mut '{s}')", .{decl.name});
},
- .decl_ref => return out_stream.writeAll("(decl ref)"),
+ .decl_ref => {
+ const decl = val.castTag(.decl_ref).?.data;
+ return out_stream.print("(decl ref '{s}')", .{decl.name});
+ },
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
try out_stream.print("&[{}] ", .{elem_ptr.index});
diff --git a/test/behavior/bugs/1025.zig b/test/behavior/bugs/1025.zig
index 33ceb9fedf..62d3687ddb 100644
--- a/test/behavior/bugs/1025.zig
+++ b/test/behavior/bugs/1025.zig
@@ -9,7 +9,6 @@ fn getA() A {
}
test "bug 1025" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const a = getA();
try @import("std").testing.expect(a.B == u8);
}
diff --git a/test/behavior/bugs/1277.zig b/test/behavior/bugs/1277.zig
index f6d7b91928..3b59ea36e8 100644
--- a/test/behavior/bugs/1277.zig
+++ b/test/behavior/bugs/1277.zig
@@ -13,6 +13,5 @@ fn f() i32 {
test "don't emit an LLVM global for a const function when it's in an optional in a struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try std.testing.expect(s.f.?() == 1234);
}
diff --git a/test/behavior/bugs/1310.zig b/test/behavior/bugs/1310.zig
index 40845ddb03..25509299fb 100644
--- a/test/behavior/bugs/1310.zig
+++ b/test/behavior/bugs/1310.zig
@@ -24,6 +24,5 @@ fn agent_callback(_vm: [*]VM, options: [*]u8) callconv(.C) i32 {
test "fixed" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(agent_callback(undefined, undefined) == 11);
}
diff --git a/test/behavior/bugs/1500.zig b/test/behavior/bugs/1500.zig
index eb2a06b7fb..6a41617d2b 100644
--- a/test/behavior/bugs/1500.zig
+++ b/test/behavior/bugs/1500.zig
@@ -7,7 +7,6 @@ const B = *const fn (A) void;
test "allow these dependencies" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var a: A = undefined;
var b: B = undefined;
if (false) {
From 37fea3e3ddc1f7d266d95789c3b1005291bcb96b Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Tue, 8 Feb 2022 19:12:43 +0100
Subject: [PATCH 0084/2031] wasm: Store stack-offset as WValue
Rather than using runtime to perform pointer arithmetic to set the stack offset as
a pointer into a local, we now store the offset as a WValue from the bottom of the stack.
This has the benefit of less instructions, few locals, and less performance impact when
we allocate a value on the virtual stack.
---
src/arch/wasm/CodeGen.zig | 217 ++++++++++++++++++++++----------------
1 file changed, 126 insertions(+), 91 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 0361d4ffdd..5186f0231b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -51,6 +51,22 @@ const WValue = union(enum) {
/// In wasm function pointers are indexes into a function table,
/// rather than an address in the data section.
function_index: u32,
+ /// Offset from the bottom of the stack, with the offset
+ /// pointing to where the value lives.
+ stack_offset: u32,
+
+ /// Returns the offset from the bottom of the stack. This is useful when
+ /// we use the load or store instruction to ensure we retrieve the value
+ /// from the correct position, rather than the value that lives at the
+ /// bottom of the stack. For instances where `WValue` is not `stack_value`
+ /// this will return 0, which allows us to simply call this function for all
+ /// loads and stores without requiring checks everywhere.
+ fn offset(self: WValue) u32 {
+ switch (self) {
+ .stack_offset => |offset| return offset,
+ else => return 0,
+ }
+ }
};
/// Wasm ops, but without input/output/signedness information
@@ -778,6 +794,7 @@ fn emitWValue(self: *Self, value: WValue) InnerError!void {
try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
},
.function_index => |index| try self.addLabel(.function_index, index), // write function index and generate relocation
+ .stack_offset => try self.addLabel(.local_get, self.bottom_stack_value.local), // caller must ensure to address the offset
}
}
@@ -1372,18 +1389,6 @@ fn restoreStackPointer(self: *Self) !void {
try self.addLabel(.global_set, 0);
}
-/// Saves the current stack size's stack pointer position into a given local
-/// It does this by retrieving the bottom stack pointer, adding `self.stack_size` and storing
-/// the result back into the local.
-fn saveStack(self: *Self) !WValue {
- const local = try self.allocLocal(Type.usize);
- try self.addLabel(.local_get, self.bottom_stack_value.local);
- try self.addImm32(@intCast(i32, self.stack_size));
- try self.addTag(.i32_add);
- try self.addLabel(.local_set, local.local);
- return local;
-}
-
/// From a given type, will create space on the virtual stack to store the value of such type.
/// This returns a `WValue` with its active tag set to `local`, containing the index to the local
/// that points to the position on the virtual stack. This function should be used instead of
@@ -1408,8 +1413,7 @@ fn allocStack(self: *Self, ty: Type) !WValue {
const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_align);
defer self.stack_size = offset + abi_size;
- // store the stack pointer and return a local to it
- return self.saveStack();
+ return WValue{ .stack_offset = offset };
}
/// From a given AIR instruction generates a pointer to the stack where
@@ -1439,8 +1443,7 @@ fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_alignment);
defer self.stack_size = offset + abi_size;
- // store the stack pointer and return a local to it
- return self.saveStack();
+ return WValue{ .stack_offset = offset };
}
/// From given zig bitsize, returns the wasm bitsize
@@ -1458,14 +1461,16 @@ fn toWasmIntBits(bits: u16) ?u16 {
fn memCopy(self: *Self, ty: Type, lhs: WValue, rhs: WValue) !void {
const abi_size = ty.abiSize(self.target);
var offset: u32 = 0;
+ const lhs_base = lhs.offset();
+ const rhs_base = rhs.offset();
while (offset < abi_size) : (offset += 1) {
// get lhs' address to store the result
try self.emitWValue(lhs);
// load byte from rhs' adress
try self.emitWValue(rhs);
- try self.addMemArg(.i32_load8_u, .{ .offset = offset, .alignment = 1 });
+ try self.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
// store the result in lhs (we already have its address on the stack)
- try self.addMemArg(.i32_store8, .{ .offset = offset, .alignment = 1 });
+ try self.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
}
}
@@ -1533,19 +1538,19 @@ fn isByRef(ty: Type, target: std.Target) bool {
/// local value to store the pointer. This allows for local re-use and improves binary size.
fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
// do not perform arithmetic when offset is 0.
- if (offset == 0) return ptr_value;
+ if (offset == 0 and ptr_value.offset() == 0) return ptr_value;
const result_ptr: WValue = switch (action) {
.new => try self.allocLocal(Type.usize),
.modify => ptr_value,
};
try self.emitWValue(ptr_value);
- switch (self.target.cpu.arch.ptrBitWidth()) {
- 32 => {
- try self.addImm32(@bitCast(i32, @intCast(u32, offset)));
+ switch (self.arch()) {
+ .wasm32 => {
+ try self.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset())));
try self.addTag(.i32_add);
},
- 64 => {
- try self.addImm64(offset);
+ .wasm64 => {
+ try self.addImm64(offset + ptr_value.offset());
try self.addTag(.i64_add);
},
else => unreachable,
@@ -1554,16 +1559,6 @@ fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64, action: enum
return result_ptr;
}
-/// Creates a new local and sets its value to the given `value` local.
-/// User must ensure `ty` matches that of given `value`.
-/// Asserts `value` is a `local`.
-fn copyLocal(self: *Self, value: WValue, ty: Type) InnerError!WValue {
- const copy = try self.allocLocal(ty);
- try self.addLabel(.local_get, value.local);
- try self.addLabel(.local_set, copy.local);
- return copy;
-}
-
fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
const air_tags = self.air.instructions.items(.tag);
return switch (air_tags[inst]) {
@@ -1789,7 +1784,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const sret = if (first_param_sret) blk: {
const sret_local = try self.allocStack(ret_ty);
- try self.emitWValue(sret_local);
+ const ptr_offset = try self.buildPointerOffset(sret_local, 0, .new);
+ try self.emitWValue(ptr_offset);
break :blk sret_local;
} else WValue{ .none = {} };
@@ -1799,7 +1795,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const arg_ty = self.air.typeOf(arg_ref);
if (!arg_ty.hasRuntimeBits()) continue;
- try self.emitWValue(arg_val);
+
+ switch (arg_val) {
+ .stack_offset => try self.emitWValue(try self.buildPointerOffset(arg_val, 0, .new)),
+ else => try self.emitWValue(arg_val),
+ }
}
if (target) |direct| {
@@ -1865,7 +1865,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
if (!pl_ty.hasRuntimeBits()) {
- return self.store(lhs, rhs, Type.initTag(.u8), 0);
+ return self.store(lhs, rhs, Type.u8, 0);
}
return self.memCopy(ty, lhs, rhs);
@@ -1891,7 +1891,13 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
else => {},
}
try self.emitWValue(lhs);
- try self.emitWValue(rhs);
+ // In this case we're actually interested in storing the stack position
+ // into lhs, so we calculate that and emit that instead
+ if (rhs == .stack_offset) {
+ try self.emitWValue(try self.buildPointerOffset(rhs, 0, .new));
+ } else {
+ try self.emitWValue(rhs);
+ }
const valtype = typeToValtype(ty, self.target);
const abi_size = @intCast(u8, ty.abiSize(self.target));
@@ -1904,7 +1910,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
// store rhs value at stack pointer's location in memory
try self.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset, .alignment = ty.abiAlignment(self.target) },
+ .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(self.target) },
);
}
@@ -1947,7 +1953,7 @@ fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
try self.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset, .alignment = ty.abiAlignment(self.target) },
+ .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(self.target) },
);
// store the result in a local
@@ -2358,7 +2364,12 @@ fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// if operand has codegen bits we should break with a value
if (self.air.typeOf(br.operand).hasRuntimeBits()) {
- try self.emitWValue(try self.resolveInst(br.operand));
+ const operand = try self.resolveInst(br.operand);
+ const op = switch (operand) {
+ .stack_offset => try self.buildPointerOffset(operand, 0, .new),
+ else => operand,
+ };
+ try self.emitWValue(op);
if (block.value != .none) {
try self.addLabel(.local_set, block.value.local);
@@ -2381,8 +2392,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// wasm does not have booleans nor the `not` instruction, therefore compare with 0
// to create the same logic
- try self.addImm32(0);
- try self.addTag(.i32_eq);
+ try self.addTag(.i32_eqz);
// save the result in the local
const not_tmp = try self.allocLocal(Type.initTag(.i32));
@@ -2406,8 +2416,7 @@ fn airUnreachable(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airBitcast(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand = try self.resolveInst(ty_op.operand);
- return operand;
+ return self.resolveInst(ty_op.operand);
}
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2437,7 +2446,12 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerEr
}
fn structFieldPtr(self: *Self, struct_ptr: WValue, offset: u32) InnerError!WValue {
- return self.buildPointerOffset(struct_ptr, offset, .new);
+ switch (struct_ptr) {
+ .stack_offset => |stack_offset| {
+ return WValue{ .stack_offset = stack_offset + offset };
+ },
+ else => return self.buildPointerOffset(struct_ptr, offset, .new),
+ }
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2455,7 +2469,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
};
if (isByRef(field_ty, self.target)) {
- return self.buildPointerOffset(operand, offset, .new);
+ switch (operand) {
+ .stack_offset => |stack_offset| {
+ return WValue{ .stack_offset = stack_offset + offset };
+ },
+ else => return self.buildPointerOffset(operand, offset, .new),
+ }
}
return self.load(operand, field_ty, offset);
@@ -2621,7 +2640,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W
try self.emitWValue(operand);
if (pl_ty.hasRuntimeBits()) {
try self.addMemArg(.i32_load16_u, .{
- .offset = 0,
+ .offset = operand.offset(),
.alignment = err_ty.errorUnionSet().abiAlignment(self.target),
});
}
@@ -2679,9 +2698,9 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.store(payload_ptr, operand, op_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
- try self.addLabel(.local_get, err_union.local);
+ try self.emitWValue(err_union);
try self.addImm32(0);
- try self.addMemArg(.i32_store16, .{ .offset = 0, .alignment = 2 });
+ try self.addMemArg(.i32_store16, .{ .offset = err_union.offset(), .alignment = 2 });
return err_union;
}
@@ -2752,7 +2771,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode)
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasRuntimeBits()) {
- try self.addMemArg(.i32_load8_u, .{ .offset = 0, .alignment = 1 });
+ try self.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
}
}
@@ -2820,7 +2839,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
try self.emitWValue(operand);
try self.addImm32(1);
- try self.addMemArg(.i32_store8, .{ .offset = 0, .alignment = 1 });
+ try self.addMemArg(.i32_store8, .{ .offset = operand.offset(), .alignment = 1 });
return self.buildPointerOffset(operand, offset, .new);
}
@@ -2832,9 +2851,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const payload_ty = self.air.typeOf(ty_op.operand);
if (!payload_ty.hasRuntimeBits()) {
const non_null_bit = try self.allocStack(Type.initTag(.u1));
- try self.addLabel(.local_get, non_null_bit.local);
+ try self.emitWValue(non_null_bit);
try self.addImm32(1);
- try self.addMemArg(.i32_store8, .{ .offset = 0, .alignment = 1 });
+ try self.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
return non_null_bit;
}
@@ -2849,9 +2868,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// Create optional type, set the non-null bit, and store the operand inside the optional type
const result = try self.allocStack(op_ty);
- try self.addLabel(.local_get, result.local);
+ try self.emitWValue(result);
try self.addImm32(1);
- try self.addMemArg(.i32_store8, .{ .offset = 0, .alignment = 1 });
+ try self.addMemArg(.i32_store8, .{ .offset = result.offset(), .alignment = 1 });
const payload_ptr = try self.buildPointerOffset(result, offset, .new);
try self.store(payload_ptr, operand, payload_ty, 0);
@@ -3013,8 +3032,6 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const array_ty = self.air.typeOf(ty_op.operand).childType();
- const ty = Type.@"usize";
- const ptr_width = @intCast(u32, ty.abiSize(self.target));
const slice_ty = self.air.getRefType(ty_op.ty);
// create a slice on the stack
@@ -3022,15 +3039,12 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// store the array ptr in the slice
if (array_ty.hasRuntimeBits()) {
- try self.store(slice_local, operand, ty, 0);
+ try self.store(slice_local, operand, Type.usize, 0);
}
// store the length of the array in the slice
- const len = array_ty.arrayLen();
- try self.addImm32(@bitCast(i32, @intCast(u32, len)));
- const len_local = try self.allocLocal(ty);
- try self.addLabel(.local_set, len_local.local);
- try self.store(slice_local, len_local, ty, ptr_width);
+ const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) };
+ try self.store(slice_local, len, Type.usize, self.ptrSize());
return slice_local;
}
@@ -3038,7 +3052,13 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const un_op = self.air.instructions.items(.data)[inst].un_op;
- return self.resolveInst(un_op);
+ const operand = try self.resolveInst(un_op);
+
+ switch (operand) {
+ // for stack offset, return a pointer to this offset.
+ .stack_offset => return self.buildPointerOffset(operand, 0, .new),
+ else => return operand,
+ }
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3046,16 +3066,20 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
- const pointer = try self.resolveInst(bin_op.lhs);
+ const ptr = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
const elem_ty = ptr_ty.childType();
const elem_size = elem_ty.abiSize(self.target);
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(pointer, Type.usize, 0);
+ const ptr_local = try self.load(ptr, Type.usize, 0);
try self.addLabel(.local_get, ptr_local.local);
} else {
+ const pointer = switch (ptr) {
+ .stack_offset => try self.buildPointerOffset(ptr, 0, .new),
+ else => ptr,
+ };
try self.emitWValue(pointer);
}
@@ -3089,7 +3113,11 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ptr_local = try self.load(ptr, Type.usize, 0);
try self.addLabel(.local_get, ptr_local.local);
} else {
- try self.emitWValue(ptr);
+ const pointer = switch (ptr) {
+ .stack_offset => try self.buildPointerOffset(ptr, 0, .new),
+ else => ptr,
+ };
+ try self.emitWValue(pointer);
}
// calculate index into ptr
@@ -3118,7 +3146,11 @@ fn airPtrBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
- try self.emitWValue(ptr);
+ const pointer = switch (ptr) {
+ .stack_offset => try self.buildPointerOffset(ptr, 0, .new),
+ else => ptr,
+ };
+ try self.emitWValue(pointer);
try self.emitWValue(offset);
try self.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(self.target))));
try self.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
@@ -3138,7 +3170,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const len = try self.resolveInst(bin_op.rhs);
try self.memSet(ptr, len, value);
- return WValue.none;
+ return WValue{ .none = {} };
}
/// Sets a region of memory at `ptr` to the value of `value`
@@ -3149,7 +3181,10 @@ fn memSet(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves
if (std.Target.wasm.featureSetHas(self.target.cpu.features, .bulk_memory)) {
- try self.emitWValue(ptr);
+ switch (ptr) {
+ .stack_offset => try self.emitWValue(try self.buildPointerOffset(ptr, 0, .new)),
+ else => try self.emitWValue(ptr),
+ }
try self.emitWValue(value);
try self.emitWValue(len);
try self.addExtended(.memory_fill);
@@ -3172,18 +3207,18 @@ fn memSet(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
try self.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
try self.emitWValue(ptr);
try self.emitWValue(offset);
- switch (self.ptrSize()) {
- 4 => try self.addTag(.i32_add),
- 8 => try self.addTag(.i64_add),
+ switch (self.arch()) {
+ .wasm32 => try self.addTag(.i32_add),
+ .wasm64 => try self.addTag(.i64_add),
else => unreachable,
}
try self.emitWValue(value);
- const mem_store_op: Mir.Inst.Tag = switch (self.ptrSize()) {
- 4 => .i32_store8,
- 8 => .i64_store8,
+ const mem_store_op: Mir.Inst.Tag = switch (self.arch()) {
+ .wasm32 => .i32_store8,
+ .wasm64 => .i64_store8,
else => unreachable,
};
- try self.addMemArg(mem_store_op, .{ .offset = 0, .alignment = 1 });
+ try self.addMemArg(mem_store_op, .{ .offset = ptr.offset(), .alignment = 1 });
try self.emitWValue(offset);
try self.addImm32(1);
switch (self.ptrSize()) {
@@ -3207,14 +3242,18 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const elem_ty = array_ty.childType();
const elem_size = elem_ty.abiSize(self.target);
- // calculate index into slice
- try self.emitWValue(array);
+ const array_ptr = switch (array) {
+ .stack_offset => try self.buildPointerOffset(array, 0, .new),
+ else => array,
+ };
+
+ try self.emitWValue(array_ptr);
try self.emitWValue(index);
try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(elem_ty);
+ const result = try self.allocLocal(Type.usize);
try self.addLabel(.local_set, result.local);
if (isByRef(elem_ty, self.target)) {
@@ -3277,9 +3316,7 @@ fn airVectorInit(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
- const offset = try self.allocLocal(Type.usize);
- try self.emitWValue(result);
- try self.addLabel(.local_set, offset.local);
+ const offset = try self.buildPointerOffset(result, 0, .new);
for (elements) |elem, elem_index| {
const elem_val = try self.resolveInst(elem);
try self.store(offset, elem_val, elem_ty, 0);
@@ -3301,9 +3338,7 @@ fn airVectorInit(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
.Struct => {
const tuple = vector_ty.castTag(.tuple).?.data;
const result = try self.allocStack(vector_ty);
- const offset = try self.allocLocal(Type.usize); // pointer to offset
- try self.emitWValue(result);
- try self.addLabel(.local_set, offset.local);
+ const offset = try self.buildPointerOffset(result, 0, .new); // pointer to offset
for (elements) |elem, elem_index| {
if (tuple.values[elem_index].tag() != .unreachable_value) continue;
@@ -3379,10 +3414,10 @@ fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.ma
const result = try self.allocLocal(Type.initTag(.i32));
{
try self.startBlock(.block, wasm.block_empty);
- const lhs_high_bit = try self.load(lhs, Type.initTag(.u64), 0);
- const lhs_low_bit = try self.load(lhs, Type.initTag(.u64), 8);
- const rhs_high_bit = try self.load(rhs, Type.initTag(.u64), 0);
- const rhs_low_bit = try self.load(rhs, Type.initTag(.u64), 8);
+ const lhs_high_bit = try self.load(lhs, Type.u64, 0);
+ const lhs_low_bit = try self.load(lhs, Type.u64, 8);
+ const rhs_high_bit = try self.load(rhs, Type.u64, 0);
+ const rhs_low_bit = try self.load(rhs, Type.u64, 8);
try self.emitWValue(lhs_high_bit);
try self.emitWValue(rhs_high_bit);
try self.addTag(.i64_ne);
From 6b0c950cb873f5fe65c0029140b91dfd8a7b1adb Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Tue, 8 Feb 2022 12:56:50 +0100
Subject: [PATCH 0085/2031] stage2 ARM: support all integer types in
genTypedValue
---
src/arch/arm/CodeGen.zig | 24 +++++++++++++++++-------
test/behavior/align.zig | 2 --
test/behavior/bugs/1277.zig | 1 -
test/behavior/bugs/1310.zig | 1 -
test/behavior/bugs/2006.zig | 1 -
test/behavior/cast.zig | 4 ----
test/behavior/struct.zig | 6 ------
7 files changed, 17 insertions(+), 22 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index fb473ef412..9e1e3d7f43 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -3180,7 +3180,6 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
switch (mcv) {
.dead => unreachable,
- .ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
@@ -3194,6 +3193,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
else => return self.fail("TODO implement memset", .{}),
}
},
+ .ptr_stack_offset => {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ },
.compare_flags_unsigned,
.compare_flags_signed,
.immediate,
@@ -3858,9 +3861,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.local_sym_index };
+ unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
@@ -3929,10 +3930,19 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
},
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits > ptr_bits or info.signedness == .signed) {
- return self.fail("TODO const int bigger than ptr and signed int", .{});
+ if (info.bits <= ptr_bits) {
+ const unsigned = switch (info.signedness) {
+ .signed => blk: {
+ const signed = @intCast(i32, typed_value.val.toSignedInt());
+ break :blk @bitCast(u32, signed);
+ },
+ .unsigned => @intCast(u32, typed_value.val.toUnsignedInt()),
+ };
+
+ return MCValue{ .immediate = unsigned };
+ } else {
+ return self.lowerUnnamedConst(typed_value);
}
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 1044742627..96278524c0 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -93,8 +93,6 @@ test "@ptrCast preserves alignment of bigger source" {
}
test "alignstack" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(fnWithAlignedStack() == 1234);
}
diff --git a/test/behavior/bugs/1277.zig b/test/behavior/bugs/1277.zig
index 3b59ea36e8..46fa1d27d4 100644
--- a/test/behavior/bugs/1277.zig
+++ b/test/behavior/bugs/1277.zig
@@ -12,6 +12,5 @@ fn f() i32 {
}
test "don't emit an LLVM global for a const function when it's in an optional in a struct" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try std.testing.expect(s.f.?() == 1234);
}
diff --git a/test/behavior/bugs/1310.zig b/test/behavior/bugs/1310.zig
index 25509299fb..1f19ec20c2 100644
--- a/test/behavior/bugs/1310.zig
+++ b/test/behavior/bugs/1310.zig
@@ -23,6 +23,5 @@ fn agent_callback(_vm: [*]VM, options: [*]u8) callconv(.C) i32 {
}
test "fixed" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(agent_callback(undefined, undefined) == 11);
}
diff --git a/test/behavior/bugs/2006.zig b/test/behavior/bugs/2006.zig
index 15f74b4485..3719271bdf 100644
--- a/test/behavior/bugs/2006.zig
+++ b/test/behavior/bugs/2006.zig
@@ -7,7 +7,6 @@ const S = struct {
};
test "bug 2006" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var a: S = undefined;
a = S{ .p = undefined };
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index d8fbc5ed9e..79f75f773c 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -35,8 +35,6 @@ fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
}
test "resolve undefined with integer" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try testResolveUndefWithInt(true, 1234);
comptime try testResolveUndefWithInt(true, 1234);
}
@@ -205,8 +203,6 @@ test "implicit cast from *[N]T to [*c]T" {
}
test "*usize to *void" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
var i = @as(usize, 0);
var v = @ptrCast(*void, &i);
v.* = {};
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index e4b64a39d3..6a0ebb5123 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -114,8 +114,6 @@ test "struct byval assign" {
}
test "call struct static method" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
const result = StructWithNoFields.add(3, 4);
try expect(result == 7);
}
@@ -193,8 +191,6 @@ test "store member function in variable" {
}
test "member functions" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
const r = MemberFnRand{ .seed = 1234 };
try expect(r.getSeed() == 1234);
}
@@ -244,8 +240,6 @@ test "call method with mutable reference to struct with no fields" {
}
test "usingnamespace within struct scope" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
const S = struct {
usingnamespace struct {
pub fn inner() i32 {
From 8fe9d2f9867101fc8d6a91c6e10c6f3b644ce6a8 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Tue, 8 Feb 2022 13:21:54 +0100
Subject: [PATCH 0086/2031] stage2 ARM: airStructFieldVal for more MCValues
---
src/arch/arm/CodeGen.zig | 11 ++++++++++-
test/behavior/struct.zig | 4 ----
2 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 9e1e3d7f43..2116717cd1 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -1703,9 +1703,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
const struct_field_ty = struct_ty.structFieldType(index);
const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
+ const adjusted_field_offset = struct_size - struct_field_offset - struct_field_size;
+
switch (mcv) {
+ .dead, .unreach => unreachable,
+ .stack_argument_offset => |off| {
+ break :result MCValue{ .stack_argument_offset = off + adjusted_field_offset };
+ },
.stack_offset => |off| {
- break :result MCValue{ .stack_offset = off + struct_size - struct_field_offset - struct_field_size };
+ break :result MCValue{ .stack_offset = off + adjusted_field_offset };
+ },
+ .memory => |addr| {
+ break :result MCValue{ .memory = addr + adjusted_field_offset };
},
else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 6a0ebb5123..03be28b9d1 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -174,16 +174,12 @@ const MemberFnTestFoo = struct {
};
test "call member function directly" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
const instance = MemberFnTestFoo{ .x = 1234 };
const result = MemberFnTestFoo.member(instance);
try expect(result == 1234);
}
test "store member function in variable" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
const instance = MemberFnTestFoo{ .x = 1234 };
const memberFn = MemberFnTestFoo.member;
const result = memberFn(instance);
From e42b5e76bacaf221f3da3f4ffe769f603a51cf44 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 22:35:34 +0100
Subject: [PATCH 0087/2031] stage2: handle void type in Elf DWARF gen
Enable more behavior tests on both x64 and arm
---
src/link/Elf.zig | 4 +-
test/behavior.zig | 2 +-
test/behavior/basic.zig | 79 +++++++++++++++++++++++++++++++++++++
test/behavior/bugs/2006.zig | 1 -
test/behavior/bugs/3367.zig | 1 -
5 files changed, 83 insertions(+), 4 deletions(-)
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index ea9556a952..2a756b3347 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -3057,8 +3057,10 @@ fn addDbgInfoType(
var relocs = std.ArrayList(struct { ty: Type, reloc: u32 }).init(arena);
switch (ty.zigTypeTag()) {
- .Void => unreachable,
.NoReturn => unreachable,
+ .Void => {
+ try dbg_info_buffer.append(abbrev_pad1);
+ },
.Bool => {
try dbg_info_buffer.appendSlice(&[_]u8{
abbrev_base_type,
diff --git a/test/behavior.zig b/test/behavior.zig
index 7b6cb6b402..c15de44597 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -4,6 +4,7 @@ test {
_ = @import("behavior/align.zig");
_ = @import("behavior/alignof.zig");
_ = @import("behavior/array.zig");
+ _ = @import("behavior/basic.zig");
_ = @import("behavior/bit_shifting.zig");
_ = @import("behavior/bool.zig");
_ = @import("behavior/bugs/394.zig");
@@ -43,7 +44,6 @@ test {
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
_ = @import("behavior/array_llvm.zig");
- _ = @import("behavior/basic.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bugs/624.zig");
_ = @import("behavior/bugs/704.zig");
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 0c2cfbc3d5..5fc26d15bb 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -15,6 +15,8 @@ test "empty function with comments" {
}
test "truncate" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
try expect(testTruncate(0x10fd) == 0xfd);
comptime try expect(testTruncate(0x10fd) == 0xfd);
}
@@ -23,6 +25,9 @@ fn testTruncate(x: u32) u8 {
}
test "truncate to non-power-of-two integers" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try testTrunc(u32, u1, 0b10101, 0b1);
try testTrunc(u32, u1, 0b10110, 0b0);
try testTrunc(u32, u2, 0b10101, 0b01);
@@ -108,14 +113,23 @@ fn first4KeysOfHomeRow() []const u8 {
}
test "return string from function" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try expect(mem.eql(u8, first4KeysOfHomeRow(), "aoeu"));
}
test "hex escape" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
test "multiline string" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 =
\\one
\\two)
@@ -126,6 +140,9 @@ test "multiline string" {
}
test "multiline string comments at start" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 =
//\\one
\\two)
@@ -136,6 +153,9 @@ test "multiline string comments at start" {
}
test "multiline string comments at end" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 =
\\one
\\two)
@@ -146,6 +166,9 @@ test "multiline string comments at end" {
}
test "multiline string comments in middle" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 =
\\one
//\\two)
@@ -156,6 +179,9 @@ test "multiline string comments in middle" {
}
test "multiline string comments at multiple places" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 =
\\one
//\\two
@@ -172,6 +198,9 @@ test "string concatenation" {
}
test "array mult operator" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
}
@@ -195,6 +224,9 @@ test "compile time global reinterpret" {
}
test "cast undefined" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const array: [100]u8 = undefined;
const slice = @as([]const u8, &array);
testCastUndefined(slice);
@@ -204,6 +236,8 @@ fn testCastUndefined(x: []const u8) void {
}
test "implicit cast after unreachable" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
try expect(outer() == 1234);
}
fn inner() i32 {
@@ -259,6 +293,9 @@ fn fB() []const u8 {
}
test "call function pointer in struct" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(mem.eql(u8, f3(true), "a"));
@@ -282,6 +319,8 @@ const FnPtrWrapper = struct {
};
test "const ptr from var variable" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
var x: u64 = undefined;
var y: u64 = undefined;
@@ -296,6 +335,8 @@ fn copy(src: *const u64, dst: *u64) void {
}
test "call result of if else expression" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -307,6 +348,8 @@ fn f2(x: bool) []const u8 {
}
test "memcpy and memset intrinsics" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
try testMemcpyMemset();
@@ -327,6 +370,8 @@ fn testMemcpyMemset() !void {
}
test "variable is allowed to be a pointer to an opaque type" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x: i32 = 1234;
@@ -338,6 +383,9 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
}
test "take address of parameter" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try testTakeAddressOfParameter(12.34);
}
fn testTakeAddressOfParameter(f: f32) !void {
@@ -360,6 +408,9 @@ fn testPointerToVoidReturnType2() *const void {
}
test "array 2D const double ptr" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const rect_2d_vertexes = [_][1]f32{
@@ -376,6 +427,9 @@ fn testArray2DConstDoublePtr(ptr: *const f32) !void {
}
test "double implicit cast in same expression" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
var x = @as(i32, @as(u16, nine()));
try expect(x == 9);
}
@@ -384,6 +438,8 @@ fn nine() u8 {
}
test "struct inside function" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
try testStructInFn();
comptime try testStructInFn();
}
@@ -411,6 +467,8 @@ fn getNull() ?*i32 {
}
test "global variable assignment with optional unwrapping with var initialized to undefined" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
const S = struct {
var data: i32 = 1234;
fn foo() ?*i32 {
@@ -426,6 +484,9 @@ test "global variable assignment with optional unwrapping with var initialized t
var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const S = struct {
fn doTheTest(arg: i32) i32 {
const st = Structy{
@@ -523,6 +584,9 @@ test "self reference through fn ptr field" {
}
test "global variable initialized to global variable array element" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
try expect(global_ptr == &gdt[0]);
@@ -537,6 +601,8 @@ var gdt = [_]GDTEntry{
var global_ptr = &gdt[0];
test "global constant is loaded with a runtime-known index" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
const S = struct {
fn doTheTest() !void {
var index: usize = 1;
@@ -552,6 +618,9 @@ test "global constant is loaded with a runtime-known index" {
}
test "multiline string literal is null terminated" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 =
\\one
\\two)
@@ -582,6 +651,9 @@ test "explicit cast optional pointers" {
}
test "pointer comparison" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const a = @as([]const u8, "a");
const b = &a;
try expect(ptrEql(b, b));
@@ -591,6 +663,9 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
}
test "string concatenation" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const a = "OK" ++ " IT " ++ "WORKED";
const b = "OK IT WORKED";
@@ -610,6 +685,8 @@ test "string concatenation" {
}
test "thread local variable" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const S = struct {
@@ -634,6 +711,8 @@ fn maybe(x: bool) anyerror!?u32 {
}
test "pointer to thread local array" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const s = "Hello world";
diff --git a/test/behavior/bugs/2006.zig b/test/behavior/bugs/2006.zig
index 3719271bdf..4d76230c88 100644
--- a/test/behavior/bugs/2006.zig
+++ b/test/behavior/bugs/2006.zig
@@ -7,7 +7,6 @@ const S = struct {
};
test "bug 2006" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var a: S = undefined;
a = S{ .p = undefined };
try expect(@sizeOf(S) != 0);
diff --git a/test/behavior/bugs/3367.zig b/test/behavior/bugs/3367.zig
index 0607263f9a..f540fdf6df 100644
--- a/test/behavior/bugs/3367.zig
+++ b/test/behavior/bugs/3367.zig
@@ -12,7 +12,6 @@ const Mixin = struct {
test "container member access usingnamespace decls" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var foo = Foo{};
foo.two();
}
From f1d2141849e0ae01910bef52cada0de0d0322e72 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 23:48:42 +0100
Subject: [PATCH 0088/2031] stage2: handle direct and got load for stack args
---
src/arch/x86_64/CodeGen.zig | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 9abeadad5d..de8f907dea 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -3482,6 +3482,8 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
},
.memory,
.embedded_in_code,
+ .direct_load,
+ .got_load,
=> {
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
From c689df1215fbe27485eb57751a407e6d5a991ca1 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 23:59:10 +0100
Subject: [PATCH 0089/2031] stage2: disable some tests on x86_64-macos
---
test/behavior/basic.zig | 3 +++
1 file changed, 3 insertions(+)
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 5fc26d15bb..13d7e833d5 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -194,6 +194,8 @@ test "multiline string comments at multiple places" {
}
test "string concatenation" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try expect(mem.eql(u8, "OK" ++ " IT " ++ "WORKED", "OK IT WORKED"));
}
@@ -395,6 +397,7 @@ fn testTakeAddressOfParameter(f: f32) !void {
test "pointer to void return type" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) return error.SkipZigTest;
try testPointerToVoidReturnType();
}
From fd6c351263e99af4dca73af953711e6d1b57f4e4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 17:22:11 -0700
Subject: [PATCH 0090/2031] cmake: fix -DZIG_SINGLE_THREADED option
It was still passing --single-threaded instead of -fsingle-threaded.
---
CMakeLists.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e12e040fe4..8fd1960518 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -834,7 +834,7 @@ else()
set(ZIG1_RELEASE_ARG -OReleaseFast --strip)
endif()
if(ZIG_SINGLE_THREADED)
- set(ZIG1_SINGLE_THREADED_ARG "--single-threaded")
+ set(ZIG1_SINGLE_THREADED_ARG "-fsingle-threaded")
else()
set(ZIG1_SINGLE_THREADED_ARG "")
endif()
From 5a00e249632716b86edac088f69d19d82e307a28 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 17:26:55 -0700
Subject: [PATCH 0091/2031] std.Progress: make the API infallible
by handling `error.TimerUnsupported`. In this case, only explicit calls
to refresh() will cause the progress line to be printed.
---
lib/std/Progress.zig | 32 ++++++++++++++++++--------------
lib/std/special/test_runner.zig | 5 +----
src/Compilation.zig | 2 +-
src/stage1.zig | 5 +----
4 files changed, 21 insertions(+), 23 deletions(-)
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index 682171366b..24b66c1162 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -35,7 +35,7 @@ root: Node = undefined,
/// Keeps track of how much time has passed since the beginning.
/// Used to compare with `initial_delay_ms` and `refresh_rate_ms`.
-timer: std.time.Timer = undefined,
+timer: ?std.time.Timer = null,
/// When the previous refresh was written to the terminal.
/// Used to compare with `refresh_rate_ms`.
@@ -139,7 +139,7 @@ pub const Node = struct {
/// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this
/// API to return Progress rather than accept it as a parameter.
/// `estimated_total_items` value of 0 means unknown.
-pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) !*Node {
+pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *Node {
const stderr = std.io.getStdErr();
self.terminal = null;
if (stderr.supportsAnsiEscapeCodes()) {
@@ -161,22 +161,24 @@ pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) !*
};
self.columns_written = 0;
self.prev_refresh_timestamp = 0;
- self.timer = try std.time.Timer.start();
+ self.timer = std.time.Timer.start() catch null;
self.done = false;
return &self.root;
}
/// Updates the terminal if enough time has passed since last update. Thread-safe.
pub fn maybeRefresh(self: *Progress) void {
- const now = self.timer.read();
- if (now < self.initial_delay_ns) return;
- if (!self.update_mutex.tryLock()) return;
- defer self.update_mutex.unlock();
- // TODO I have observed this to happen sometimes. I think we need to follow Rust's
- // lead and guarantee monotonically increasing times in the std lib itself.
- if (now < self.prev_refresh_timestamp) return;
- if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return;
- return self.refreshWithHeldLock();
+ if (self.timer) |*timer| {
+ const now = timer.read();
+ if (now < self.initial_delay_ns) return;
+ if (!self.update_mutex.tryLock()) return;
+ defer self.update_mutex.unlock();
+ // TODO I have observed this to happen sometimes. I think we need to follow Rust's
+ // lead and guarantee monotonically increasing times in the std lib itself.
+ if (now < self.prev_refresh_timestamp) return;
+ if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return;
+ return self.refreshWithHeldLock();
+ }
}
/// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe.
@@ -285,7 +287,9 @@ fn refreshWithHeldLock(self: *Progress) void {
// Stop trying to write to this file once it errors.
self.terminal = null;
};
- self.prev_refresh_timestamp = self.timer.read();
+ if (self.timer) |*timer| {
+ self.prev_refresh_timestamp = timer.read();
+ }
}
pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void {
@@ -327,7 +331,7 @@ test "basic functionality" {
return error.SkipZigTest;
}
var progress = Progress{};
- const root_node = try progress.start("", 100);
+ const root_node = progress.start("", 100);
defer root_node.end();
const sub_task_names = [_][]const u8{
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 9848cb5a3e..fb00a9dc30 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -34,10 +34,7 @@ pub fn main() void {
var progress = std.Progress{
.dont_print_on_dumb = true,
};
- const root_node = progress.start("Test", test_fn_list.len) catch |err| switch (err) {
- // TODO still run tests in this case
- error.TimerUnsupported => @panic("timer unsupported"),
- };
+ const root_node = progress.start("Test", test_fn_list.len);
const have_tty = progress.terminal != null and progress.supports_ansi_escape_codes;
var async_frame_buffer: []align(std.Target.stack_align) u8 = undefined;
diff --git a/src/Compilation.zig b/src/Compilation.zig
index f07a7c9dd7..bd7581863b 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -2586,7 +2586,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
// If the terminal is dumb, we dont want to show the user all the
// output.
var progress: std.Progress = .{ .dont_print_on_dumb = true };
- var main_progress_node = try progress.start("", 0);
+ var main_progress_node = progress.start("", 0);
defer main_progress_node.end();
if (self.color == .off) progress.terminal = null;
diff --git a/src/stage1.zig b/src/stage1.zig
index b716a9c954..005dc312ba 100644
--- a/src/stage1.zig
+++ b/src/stage1.zig
@@ -305,10 +305,7 @@ export fn stage2_progress_start_root(
name_len: usize,
estimated_total_items: usize,
) *std.Progress.Node {
- return progress.start(
- name_ptr[0..name_len],
- estimated_total_items,
- ) catch @panic("timer unsupported");
+ return progress.start(name_ptr[0..name_len], estimated_total_items);
}
// ABI warning
From 846eb701821a3f2af514bbad770478e3276b2d89 Mon Sep 17 00:00:00 2001
From: Hadrien Dorio
Date: Fri, 4 Feb 2022 10:27:18 +0100
Subject: [PATCH 0092/2031] ci: azure: split build-and-test step
replace the .bat script by a pwsh script
---
CMakeLists.txt | 2 +-
ci/azure/pipelines.yml | 132 ++++++++++++++++++++++++++-----
ci/azure/windows_msvc_install | 16 ----
ci/azure/windows_msvc_script.bat | 39 ---------
ci/azure/windows_upload | 46 -----------
5 files changed, 115 insertions(+), 120 deletions(-)
delete mode 100644 ci/azure/windows_msvc_install
delete mode 100644 ci/azure/windows_msvc_script.bat
delete mode 100755 ci/azure/windows_upload
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8fd1960518..533e03383f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -63,7 +63,7 @@ if("${ZIG_VERSION}" STREQUAL "")
endif()
endif()
endif()
-message("Configuring zig version ${ZIG_VERSION}")
+message(STATUS "Configuring zig version ${ZIG_VERSION}")
set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)")
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
diff --git a/ci/azure/pipelines.yml b/ci/azure/pipelines.yml
index 4524ee9fb1..f0df4558c2 100644
--- a/ci/azure/pipelines.yml
+++ b/ci/azure/pipelines.yml
@@ -10,6 +10,7 @@ jobs:
- script: ci/azure/macos_script
name: main
displayName: 'Build and test'
+
- job: BuildMacOS_arm64
pool:
vmImage: 'macOS-10.15'
@@ -21,30 +22,125 @@ jobs:
- script: ci/azure/macos_arm64_script
name: main
displayName: 'Build'
+
- job: BuildWindows
+ timeoutInMinutes: 360
pool:
vmImage: 'windows-2019'
- timeoutInMinutes: 360
+ variables:
+ LLVM_CLANG_LLD_URL: 'https://ziglang.org/deps/llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt.tar.xz'
+ LLVM_CLANG_LLD_DIR: 'llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt'
steps:
- - powershell: |
- (New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2022-01-28/msys2-base-x86_64-20220128.sfx.exe", "sfx.exe")
- .\sfx.exe -y -o\
- displayName: Download/Extract/Install MSYS2
- - script: |
- @REM install updated filesystem package first without dependency checking
- @REM because of: https://github.com/msys2/MSYS2-packages/issues/2021
- %CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Sydd filesystem"
- displayName: Workaround filesystem dash MSYS2 dependency issue
- - script: |
- %CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu"
- %CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu"
- displayName: Update MSYS2
+ - pwsh: |
+ (New-Object Net.WebClient).DownloadFile("$(LLVM_CLANG_LLD_URL)", "${LLVM_CLANG_LLD_DIR}.tar.xz")
+ & 'C:\Program Files\7-Zip\7z.exe' x "${LLVM_CLANG_LLD_DIR}.tar.xz"
+ & 'C:\Program Files\7-Zip\7z.exe' x "${LLVM_CLANG_LLD_DIR}.tar"
+ name: install
+ displayName: 'Install LLVM/CLANG/LLD'
+
+ - pwsh: |
+ Set-Variable -Name ZIGBUILDDIR -Value "$(Get-Location)\build"
+ Set-Variable -Name ZIGINSTALLDIR -Value "$ZIGBUILDDIR\dist"
+ Set-Variable -Name ZIGPREFIXPATH -Value "$(Get-Location)\$(LLVM_CLANG_LLD_DIR)"
+
+ # Make the `zig version` number consistent.
+ # This will affect the cmake command below.
+ git config core.abbrev 9
+ git fetch --tags
+
+ mkdir $ZIGBUILDDIR
+ cd $ZIGBUILDDIR
+ & "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
+ cmake .. `
+ -Thost=x64 `
+ -G "Visual Studio 16 2019" `
+ -A x64 `
+ -DCMAKE_INSTALL_PREFIX="$ZIGINSTALLDIR" `
+ -DCMAKE_PREFIX_PATH="$ZIGPREFIXPATH" `
+ -DCMAKE_BUILD_TYPE=Release `
+ -DZIG_OMIT_STAGE2=ON 2> out-null
+ & "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\MSBuild\Current\Bin\MSBuild.exe" `
+ /maxcpucount /p:Configuration=Release INSTALL.vcxproj
+ name: build
+ displayName: 'Build'
+
+ - pwsh: |
+ Set-Variable -Name ZIGINSTALLDIR -Value "$(Get-Location)\build\dist"
+
+ # Sadly, stage2 is omitted from this build to save memory on the CI server. Once self-hosted is
+ # built with itself and does not gobble as much memory, we can enable these tests.
+ #& "$ZIGINSTALLDIR\bin\zig.exe" test "..\test\behavior.zig" -fno-stage1 -fLLVM -I "..\test" 2>&1
+
+ & "$ZIGINSTALLDIR\bin\zig.exe" build test-toolchain -Dskip-non-native -Dskip-stage2-tests 2>&1
+ & "$ZIGINSTALLDIR\bin\zig.exe" build test-std -Dskip-non-native 2>&1
+ name: test
+ displayName: 'Test'
+
+ - pwsh: |
+ Set-Variable -Name ZIGINSTALLDIR -Value "$(Get-Location)\build\dist"
+
+ & "$ZIGINSTALLDIR\bin\zig.exe" build docs
+ timeoutInMinutes: 60
+ name: doc
+ displayName: 'Documentation'
+
- task: DownloadSecureFile@1
inputs:
- secureFile: s3cfg
- - script: ci/azure/windows_msvc_script.bat
- name: main
- displayName: 'Build and test'
+ name: aws_credentials
+ secureFile: aws_credentials
+
+ - pwsh: |
+ Set-Variable -Name ZIGBUILDDIR -Value "$(Get-Location)\build"
+ $Env:AWS_SHARED_CREDENTIALS_FILE = "$(aws_credentials.secureFilePath)"
+
+ cd "$ZIGBUILDDIR"
+ mv ../LICENSE dist/
+ mv ../zig-cache/langref.html dist/
+ mv dist/bin/zig.exe dist/
+ rmdir dist/bin
+
+ # Remove the unnecessary zig dir in $prefix/lib/zig/std/std.zig
+ mv dist/lib/zig dist/lib2
+ rmdir dist/lib
+ mv dist/lib2 dist/lib
+
+ Set-Variable -Name VERSION -Value $(./dist/zig.exe version)
+ Set-Variable -Name DIRNAME -Value "zig-windows-x86_64-$VERSION"
+ Set-Variable -Name TARBALL -Value "$DIRNAME.zip"
+ mv dist "$DIRNAME"
+ 7z a "$TARBALL" "$DIRNAME"
+
+ aws s3 cp `
+ "$TARBALL" `
+ s3://ziglang.org/builds/ `
+ --cache-control 'public, max-age=31536000, immutable'
+
+ Set-Variable -Name SHASUM -Value (Get-FileHash "$TARBALL" -Algorithm SHA256 | select-object -ExpandProperty Hash)
+ Set-Variable -Name BYTESIZE -Value (Get-Item "$TARBALL").length
+
+ Set-Variable -Name JSONFILE -Value "windows-${GITBRANCH}.json"
+ echo $null > $JSONFILE
+ echo ('{"tarball": "' + $TARBALL + '",') >> $JSONFILE
+ echo ('"shasum": "' + $SHASUM + '",') >> $JSONFILE
+ echo ('"size": ' + $BYTESIZE + '}' ) >> $JSONFILE
+
+ aws s3 cp `
+ "$JSONFILE" `
+ s3://ziglang.org/builds/ `
+ --cache-control 'max-age=0, must-revalidate'
+
+ aws s3 cp `
+ "$JSONFILE" `
+ "s3://ziglang.org/builds/x86_64-windows-${VERSION}.json"
+
+ echo "##vso[task.setvariable variable=tarball;isOutput=true]$TARBALL"
+ echo "##vso[task.setvariable variable=shasum;isOutput=true]$SHASUM"
+ echo "##vso[task.setvariable variable=bytesize;isOutput=true]$BYTESIZE"
+
+ name: upload
+ condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))
+ displayName: 'Upload'
+
- job: OnMasterSuccess
dependsOn:
- BuildMacOS
diff --git a/ci/azure/windows_msvc_install b/ci/azure/windows_msvc_install
deleted file mode 100644
index 2df445fe12..0000000000
--- a/ci/azure/windows_msvc_install
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-
-set -x
-set -e
-
-pacman -Suy --needed --noconfirm
-pacman -S --needed --noconfirm wget p7zip python3-pip tar xz
-
-TARBALL="llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt.tar.xz"
-
-pip install s3cmd
-wget -nv "https://ziglang.org/deps/$TARBALL"
-# If the first extraction fails, re-try it once; this can happen if the tarball
-# contains symlinks that are in the table of contents before the files that
-# they point to.
-tar -xf $TARBALL || tar --overwrite -xf $TARBALL
diff --git a/ci/azure/windows_msvc_script.bat b/ci/azure/windows_msvc_script.bat
deleted file mode 100644
index c61c88093c..0000000000
--- a/ci/azure/windows_msvc_script.bat
+++ /dev/null
@@ -1,39 +0,0 @@
-@echo on
-SET "SRCROOT=%cd%"
-SET "PREVPATH=%PATH%"
-SET "PREVMSYSTEM=%MSYSTEM%"
-
-set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"
-SET "MSYSTEM=MINGW64"
-bash -lc "cd ${SRCROOT} && ci/azure/windows_msvc_install" || exit /b
-SET "PATH=%PREVPATH%"
-SET "MSYSTEM=%PREVMSYSTEM%"
-
-SET "ZIGBUILDDIR=%SRCROOT%\build"
-SET "ZIGINSTALLDIR=%ZIGBUILDDIR%\dist"
-SET "ZIGPREFIXPATH=%SRCROOT%\llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt"
-
-call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
-
-REM Make the `zig version` number consistent.
-REM This will affect the cmake command below.
-git.exe config core.abbrev 9
-git.exe fetch --unshallow
-git.exe fetch --tags
-
-mkdir %ZIGBUILDDIR%
-cd %ZIGBUILDDIR%
-cmake.exe .. -Thost=x64 -G"Visual Studio 16 2019" -A x64 "-DCMAKE_INSTALL_PREFIX=%ZIGINSTALLDIR%" "-DCMAKE_PREFIX_PATH=%ZIGPREFIXPATH%" -DCMAKE_BUILD_TYPE=Release -DZIG_OMIT_STAGE2=ON || exit /b
-msbuild /maxcpucount /p:Configuration=Release INSTALL.vcxproj || exit /b
-
-REM Sadly, stage2 is omitted from this build to save memory on the CI server. Once self-hosted is
-REM built with itself and does not gobble as much memory, we can enable these tests.
-REM "%ZIGINSTALLDIR%\bin\zig.exe" test "..\test\behavior.zig" -fno-stage1 -fLLVM -I "..\test" || exit /b
-
-"%ZIGINSTALLDIR%\bin\zig.exe" build test-toolchain -Dskip-non-native -Dskip-stage2-tests || exit /b
-"%ZIGINSTALLDIR%\bin\zig.exe" build test-std -Dskip-non-native || exit /b
-"%ZIGINSTALLDIR%\bin\zig.exe" build docs || exit /b
-
-set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"
-SET "MSYSTEM=MINGW64"
-bash -lc "cd ${SRCROOT} && ci/azure/windows_upload" || exit /b
diff --git a/ci/azure/windows_upload b/ci/azure/windows_upload
deleted file mode 100755
index 9c5e07e5f9..0000000000
--- a/ci/azure/windows_upload
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh
-
-set -x
-set -e
-
-if [ "${BUILD_REASON}" != "PullRequest" ]; then
- cd "$ZIGBUILDDIR"
-
- mv ../LICENSE dist/
- mv ../zig-cache/langref.html dist/
- mv dist/bin/zig.exe dist/
- rmdir dist/bin
-
- # Remove the unnecessary zig dir in $prefix/lib/zig/std/std.zig
- mv dist/lib/zig dist/lib2
- rmdir dist/lib
- mv dist/lib2 dist/lib
-
- VERSION=$(dist/zig.exe version)
- DIRNAME="zig-windows-x86_64-$VERSION"
- TARBALL="$DIRNAME.zip"
- mv dist "$DIRNAME"
- 7z a "$TARBALL" "$DIRNAME"
-
- # mv "$DOWNLOADSECUREFILE_SECUREFILEPATH" "$HOME/.s3cfg"
- s3cmd -c "$DOWNLOADSECUREFILE_SECUREFILEPATH" put -P --add-header="cache-control: public, max-age=31536000, immutable" "$TARBALL" s3://ziglang.org/builds/
-
- SHASUM=$(sha256sum $TARBALL | cut '-d ' -f1)
- BYTESIZE=$(wc -c < $TARBALL)
-
- JSONFILE="windows-$GITBRANCH.json"
- touch $JSONFILE
- echo "{\"tarball\": \"$TARBALL\"," >>$JSONFILE
- echo "\"shasum\": \"$SHASUM\"," >>$JSONFILE
- echo "\"size\": \"$BYTESIZE\"}" >>$JSONFILE
-
- s3cmd -c "$DOWNLOADSECUREFILE_SECUREFILEPATH" put -P --add-header="Cache-Control: max-age=0, must-revalidate" "$JSONFILE" "s3://ziglang.org/builds/$JSONFILE"
- s3cmd -c "$DOWNLOADSECUREFILE_SECUREFILEPATH" put -P "$JSONFILE" "s3://ziglang.org/builds/x86_64-windows-$VERSION.json"
-
- # `set -x` causes these variables to be mangled.
- # See https://developercommunity.visualstudio.com/content/problem/375679/pipeline-variable-incorrectly-inserts-single-quote.html
- set +x
- echo "##vso[task.setvariable variable=tarball;isOutput=true]$TARBALL"
- echo "##vso[task.setvariable variable=shasum;isOutput=true]$SHASUM"
- echo "##vso[task.setvariable variable=bytesize;isOutput=true]$BYTESIZE"
-fi
From 210ee1067b06c14692432b7887077003e52b2137 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 17:49:40 -0700
Subject: [PATCH 0093/2031] update more API usage of std.Progress
fixes regression introduced in 5a00e249632716b86edac088f69d19d82e307a28
---
doc/docgen.zig | 2 +-
src/test.zig | 2 +-
tools/update-license-headers.zig | 2 +-
tools/update_cpu_features.zig | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 760ebb71d3..3dd58a012d 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -1196,7 +1196,7 @@ fn genHtml(
do_code_tests: bool,
) !void {
var progress = Progress{};
- const root_node = try progress.start("Generating docgen examples", toc.nodes.len);
+ const root_node = progress.start("Generating docgen examples", toc.nodes.len);
defer root_node.end();
var env_map = try process.getEnvMap(allocator);
diff --git a/src/test.zig b/src/test.zig
index b73e11d7f5..a540d566eb 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -614,7 +614,7 @@ pub const TestContext = struct {
const host = try std.zig.system.NativeTargetInfo.detect(std.testing.allocator, .{});
var progress = std.Progress{};
- const root_node = try progress.start("compiler", self.cases.items.len);
+ const root_node = progress.start("compiler", self.cases.items.len);
defer root_node.end();
var zig_lib_directory = try introspect.findZigLibDir(std.testing.allocator);
diff --git a/tools/update-license-headers.zig b/tools/update-license-headers.zig
index 4e415784f8..b2aed7ccdb 100644
--- a/tools/update-license-headers.zig
+++ b/tools/update-license-headers.zig
@@ -6,7 +6,7 @@ const new_header = "";
pub fn main() !void {
var progress = std.Progress{};
- const root_node = try progress.start("", 0);
+ const root_node = progress.start("", 0);
defer root_node.end();
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 73c05d8cf1..933d268e2b 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -801,7 +801,7 @@ pub fn main() anyerror!void {
defer zig_src_dir.close();
var progress = std.Progress{};
- const root_progress = try progress.start("", llvm_targets.len);
+ const root_progress = progress.start("", llvm_targets.len);
defer root_progress.end();
if (builtin.single_threaded) {
From 7c1061784b8b126633cfe84f46280f7bf72beffc Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 20:03:17 -0700
Subject: [PATCH 0094/2031] stage2: fix inferred comptime constant locals
`const` declarations inside comptime blocks were not getting properly
evaluated at compile-time. To accomplish this there is a new ZIR
instruction, `alloc_inferred_comptime`. Actually we already had one
named that, but it got renamed to `alloc_inferred_comptime_mut` to match
the naming convention with the other similar instructions.
---
src/AstGen.zig | 25 +++++++++++++-----
src/Sema.zig | 20 ++++++++++-----
src/Zir.zig | 14 +++++++---
src/print_zir.zig | 3 ++-
test/behavior.zig | 2 --
test/behavior/null.zig | 48 +++++++++++++++++++++++++++++++++++
test/behavior/null_llvm.zig | 36 --------------------------
test/behavior/null_stage1.zig | 37 ---------------------------
8 files changed, 91 insertions(+), 94 deletions(-)
delete mode 100644 test/behavior/null_llvm.zig
delete mode 100644 test/behavior/null_stage1.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 5f6d05b7f5..9bc10f25e8 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2084,10 +2084,11 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.param_anytype_comptime,
.alloc,
.alloc_mut,
- .alloc_comptime,
+ .alloc_comptime_mut,
.alloc_inferred,
.alloc_inferred_mut,
.alloc_inferred_comptime,
+ .alloc_inferred_comptime_mut,
.array_cat,
.array_mul,
.array_type,
@@ -2613,7 +2614,7 @@ fn varDecl(
.type_inst = type_inst,
.align_inst = align_inst,
.is_const = true,
- .is_comptime = false,
+ .is_comptime = gz.force_comptime,
});
init_scope.instructions_top = gz.instructions.items.len;
}
@@ -2621,14 +2622,18 @@ fn varDecl(
} else {
const alloc = if (align_inst == .none) alloc: {
init_scope.instructions_top = gz.instructions.items.len;
- break :alloc try init_scope.addNode(.alloc_inferred, node);
+ const tag: Zir.Inst.Tag = if (gz.force_comptime)
+ .alloc_inferred_comptime
+ else
+ .alloc_inferred;
+ break :alloc try init_scope.addNode(tag, node);
} else alloc: {
const ref = try gz.addAllocExtended(.{
.node = node,
.type_inst = .none,
.align_inst = align_inst,
.is_const = true,
- .is_comptime = false,
+ .is_comptime = gz.force_comptime,
});
init_scope.instructions_top = gz.instructions.items.len;
break :alloc ref;
@@ -2716,7 +2721,10 @@ fn varDecl(
const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node);
const alloc = alloc: {
if (align_inst == .none) {
- const tag: Zir.Inst.Tag = if (is_comptime) .alloc_comptime else .alloc_mut;
+ const tag: Zir.Inst.Tag = if (is_comptime)
+ .alloc_comptime_mut
+ else
+ .alloc_mut;
break :alloc try gz.addUnNode(tag, type_inst, node);
} else {
break :alloc try gz.addAllocExtended(.{
@@ -2732,7 +2740,10 @@ fn varDecl(
} else a: {
const alloc = alloc: {
if (align_inst == .none) {
- const tag: Zir.Inst.Tag = if (is_comptime) .alloc_inferred_comptime else .alloc_inferred_mut;
+ const tag: Zir.Inst.Tag = if (is_comptime)
+ .alloc_inferred_comptime_mut
+ else
+ .alloc_inferred_mut;
break :alloc try gz.addNode(tag, node);
} else {
break :alloc try gz.addAllocExtended(.{
@@ -5441,7 +5452,7 @@ fn forExpr(
const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
const index_ptr = blk: {
- const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime else .alloc;
+ const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
// initialize to zero
_ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
diff --git a/src/Sema.zig b/src/Sema.zig
index 72dfb4420b..69b9adc54b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -584,9 +584,10 @@ fn analyzeBodyInner(
.alloc => try sema.zirAlloc(block, inst),
.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)),
- .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst),
+ .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)),
+ .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)),
.alloc_mut => try sema.zirAllocMut(block, inst),
- .alloc_comptime => try sema.zirAllocComptime(block, inst),
+ .alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
.anyframe_type => try sema.zirAnyframeType(block, inst),
.array_cat => try sema.zirArrayCat(block, inst),
.array_mul => try sema.zirArrayMul(block, inst),
@@ -2368,12 +2369,16 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src);
}
-fn zirAllocInferredComptime(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirAllocInferredComptime(
+ sema: *Sema,
+ inst: Zir.Inst.Index,
+ inferred_alloc_ty: Type,
+) CompileError!Air.Inst.Ref {
const src_node = sema.code.instructions.items(.data)[inst].node;
const src: LazySrcLoc = .{ .node_offset = src_node };
sema.src = src;
return sema.addConstant(
- Type.initTag(.inferred_alloc_mut),
+ inferred_alloc_ty,
try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined),
);
}
@@ -2480,6 +2485,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const final_elem_ty = try decl.ty.copy(sema.arena);
const final_ptr_ty = try Type.ptr(sema.arena, .{
.pointee_type = final_elem_ty,
+ .mutable = var_is_mut,
.@"align" = iac.data.alignment,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
@@ -2500,9 +2506,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const peer_inst_list = inferred_alloc.data.stored_inst_list.items;
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
- try sema.requireRuntimeBlock(block, src);
- try sema.resolveTypeLayout(block, ty_src, final_elem_ty);
-
const final_ptr_ty = try Type.ptr(sema.arena, .{
.pointee_type = final_elem_ty,
.mutable = var_is_mut,
@@ -2564,6 +2567,9 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
return;
}
+ try sema.requireRuntimeBlock(block, src);
+ try sema.resolveTypeLayout(block, ty_src, final_elem_ty);
+
// Change it to a normal alloc.
sema.air_instructions.set(ptr_inst, .{
.tag = .alloc,
diff --git a/src/Zir.zig b/src/Zir.zig
index dbdaa0f5fb..b7e3e60916 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -909,14 +909,18 @@ pub const Inst = struct {
/// Allocates comptime-mutable memory.
/// Uses the `un_node` union field. The operand is the type of the allocated object.
/// The node source location points to a var decl node.
- alloc_comptime,
+ alloc_comptime_mut,
/// Same as `alloc` except the type is inferred.
/// Uses the `node` union field.
alloc_inferred,
/// Same as `alloc_inferred` except mutable.
alloc_inferred_mut,
- /// Same as `alloc_comptime` except the type is inferred.
+ /// Allocates comptime const memory.
+ /// Uses the `node` union field. The type of the allocated object is inferred.
+ /// The node source location points to a var decl node.
alloc_inferred_comptime,
+ /// Same as `alloc_comptime_mut` except the type is inferred.
+ alloc_inferred_comptime_mut,
/// Each `store_to_inferred_ptr` puts the type of the stored value into a set,
/// and then `resolve_inferred_alloc` triggers peer type resolution on the set.
/// The operand is a `alloc_inferred` or `alloc_inferred_mut` instruction, which
@@ -957,10 +961,11 @@ pub const Inst = struct {
.add_sat,
.alloc,
.alloc_mut,
- .alloc_comptime,
+ .alloc_comptime_mut,
.alloc_inferred,
.alloc_inferred_mut,
.alloc_inferred_comptime,
+ .alloc_inferred_comptime_mut,
.array_cat,
.array_mul,
.array_type,
@@ -1446,10 +1451,11 @@ pub const Inst = struct {
.alloc = .un_node,
.alloc_mut = .un_node,
- .alloc_comptime = .un_node,
+ .alloc_comptime_mut = .un_node,
.alloc_inferred = .node,
.alloc_inferred_mut = .node,
.alloc_inferred_comptime = .node,
+ .alloc_inferred_comptime_mut = .node,
.resolve_inferred_alloc = .un_node,
.@"resume" = .un_node,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 1954772e37..9c79ad1a37 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -155,7 +155,7 @@ const Writer = struct {
.alloc,
.alloc_mut,
- .alloc_comptime,
+ .alloc_comptime_mut,
.indexable_ptr_len,
.anyframe_type,
.bit_not,
@@ -401,6 +401,7 @@ const Writer = struct {
.alloc_inferred,
.alloc_inferred_mut,
.alloc_inferred_comptime,
+ .alloc_inferred_comptime_mut,
=> try self.writeNode(stream, inst),
.error_value,
diff --git a/test/behavior.zig b/test/behavior.zig
index c15de44597..c177dd8634 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -105,7 +105,6 @@ test {
_ = @import("behavior/math.zig");
_ = @import("behavior/maximum_minimum.zig");
_ = @import("behavior/merge_error_sets.zig");
- _ = @import("behavior/null_llvm.zig");
_ = @import("behavior/popcount.zig");
_ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
@@ -156,7 +155,6 @@ test {
_ = @import("behavior/ir_block_deps.zig");
_ = @import("behavior/misc.zig");
_ = @import("behavior/muladd.zig");
- _ = @import("behavior/null_stage1.zig");
_ = @import("behavior/optional_stage1.zig");
_ = @import("behavior/popcount_stage1.zig");
_ = @import("behavior/reflection.zig");
diff --git a/test/behavior/null.zig b/test/behavior/null.zig
index 861921d39c..35ecafff80 100644
--- a/test/behavior/null.zig
+++ b/test/behavior/null.zig
@@ -1,3 +1,4 @@
+const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
@@ -140,3 +141,50 @@ const Particle = struct {
c: u64,
d: u64,
};
+
+test "null literal outside function" {
+ const is_null = here_is_a_null_literal.context == null;
+ try expect(is_null);
+
+ const is_non_null = here_is_a_null_literal.context != null;
+ try expect(!is_non_null);
+}
+
+const SillyStruct = struct {
+ context: ?i32,
+};
+
+const here_is_a_null_literal = SillyStruct{ .context = null };
+
+test "unwrap optional which is field of global var" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ struct_with_optional.field = null;
+ if (struct_with_optional.field) |payload| {
+ _ = payload;
+ unreachable;
+ }
+ struct_with_optional.field = 1234;
+ if (struct_with_optional.field) |payload| {
+ try expect(payload == 1234);
+ } else {
+ unreachable;
+ }
+}
+const StructWithOptional = struct {
+ field: ?i32,
+};
+
+var struct_with_optional: StructWithOptional = undefined;
+
+test "optional types" {
+ comptime {
+ const opt_type_struct = StructWithOptionalType{ .t = u8 };
+ try expect(opt_type_struct.t != null and opt_type_struct.t.? == u8);
+ }
+}
+
+const StructWithOptionalType = struct {
+ t: ?type,
+};
diff --git a/test/behavior/null_llvm.zig b/test/behavior/null_llvm.zig
deleted file mode 100644
index 25d3c0ca1c..0000000000
--- a/test/behavior/null_llvm.zig
+++ /dev/null
@@ -1,36 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-
-test "null literal outside function" {
- const is_null = here_is_a_null_literal.context == null;
- try expect(is_null);
-
- const is_non_null = here_is_a_null_literal.context != null;
- try expect(!is_non_null);
-}
-
-const SillyStruct = struct {
- context: ?i32,
-};
-
-const here_is_a_null_literal = SillyStruct{ .context = null };
-
-const StructWithOptional = struct {
- field: ?i32,
-};
-
-var struct_with_optional: StructWithOptional = undefined;
-
-test "unwrap optional which is field of global var" {
- struct_with_optional.field = null;
- if (struct_with_optional.field) |payload| {
- _ = payload;
- unreachable;
- }
- struct_with_optional.field = 1234;
- if (struct_with_optional.field) |payload| {
- try expect(payload == 1234);
- } else {
- unreachable;
- }
-}
diff --git a/test/behavior/null_stage1.zig b/test/behavior/null_stage1.zig
deleted file mode 100644
index 2b8feea242..0000000000
--- a/test/behavior/null_stage1.zig
+++ /dev/null
@@ -1,37 +0,0 @@
-const expect = @import("std").testing.expect;
-
-test "if var maybe pointer" {
- try expect(shouldBeAPlus1(Particle{
- .a = 14,
- .b = 1,
- .c = 1,
- .d = 1,
- }) == 15);
-}
-fn shouldBeAPlus1(p: Particle) u64 {
- var maybe_particle: ?Particle = p;
- if (maybe_particle) |*particle| {
- particle.a += 1;
- }
- if (maybe_particle) |particle| {
- return particle.a;
- }
- return 0;
-}
-const Particle = struct {
- a: u64,
- b: u64,
- c: u64,
- d: u64,
-};
-
-test "optional types" {
- comptime {
- const opt_type_struct = StructWithOptionalType{ .t = u8 };
- try expect(opt_type_struct.t != null and opt_type_struct.t.? == u8);
- }
-}
-
-const StructWithOptionalType = struct {
- t: ?type,
-};
From 61ed4fe07ad660f3cb4372937ccd5188ce404a44 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 7 Feb 2022 14:32:24 -0700
Subject: [PATCH 0095/2031] stage1: fix x86 i128 C ABI for extern structs
closes #10445
---
src/stage1/analyze.cpp | 13 +++++++++++--
test/stage1/c_abi/cfuncs.c | 38 ++++++++++++++++++++++++++++++++++++++
test/stage1/c_abi/main.zig | 27 +++++++++++++++++++++++++++
3 files changed, 76 insertions(+), 2 deletions(-)
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index 0dcf1fcc06..116a205df2 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -8684,14 +8684,23 @@ static Error resolve_llvm_c_abi_type(CodeGen *g, ZigType *ty) {
if (ty->data.structure.fields[i]->offset >= 8) {
eightbyte_index = 1;
}
- X64CABIClass field_class = type_c_abi_x86_64_class(g, ty->data.structure.fields[i]->type_entry);
+ ZigType *field_ty = ty->data.structure.fields[i]->type_entry;
+ X64CABIClass field_class = type_c_abi_x86_64_class(g, field_ty);
if (field_class == X64CABIClass_INTEGER) {
type_classes[eightbyte_index] = X64CABIClass_INTEGER;
} else if (type_classes[eightbyte_index] == X64CABIClass_Unknown) {
type_classes[eightbyte_index] = field_class;
}
- type_sizes[eightbyte_index] += ty->data.structure.fields[i]->type_entry->abi_size;
+ if (field_ty->abi_size > 8) {
+ assert(eightbyte_index == 0);
+ type_sizes[0] = 8;
+ type_sizes[1] = field_ty->abi_size - 8;
+ type_classes[1] = type_classes[0];
+ eightbyte_index = 1;
+ } else {
+ type_sizes[eightbyte_index] += field_ty->abi_size;
+ }
}
LLVMTypeRef return_elem_types[] = {
diff --git a/test/stage1/c_abi/cfuncs.c b/test/stage1/c_abi/cfuncs.c
index a2a5895ab4..28009e6fc7 100644
--- a/test/stage1/c_abi/cfuncs.c
+++ b/test/stage1/c_abi/cfuncs.c
@@ -11,14 +11,26 @@ static void assert_or_panic(bool ok) {
}
}
+struct i128 {
+ __int128 value;
+};
+
+struct u128 {
+ unsigned __int128 value;
+};
+
void zig_u8(uint8_t);
void zig_u16(uint16_t);
void zig_u32(uint32_t);
void zig_u64(uint64_t);
+void zig_u128(unsigned __int128);
+void zig_struct_u128(struct u128);
void zig_i8(int8_t);
void zig_i16(int16_t);
void zig_i32(int32_t);
void zig_i64(int64_t);
+void zig_i128(__int128);
+void zig_struct_i128(struct i128);
void zig_five_integers(int32_t, int32_t, int32_t, int32_t, int32_t);
void zig_f32(float);
@@ -130,11 +142,21 @@ void run_c_tests(void) {
zig_u16(0xfffe);
zig_u32(0xfffffffd);
zig_u64(0xfffffffffffffffc);
+ zig_u128(0xfffffffffffffffc);
+ {
+ struct u128 s = {0xfffffffffffffffc};
+ zig_struct_u128(s);
+ }
zig_i8(-1);
zig_i16(-2);
zig_i32(-3);
zig_i64(-4);
+ zig_i128(-5);
+ {
+ struct i128 s = {-6};
+ zig_struct_i128(s);
+ }
zig_five_integers(12, 34, 56, 78, 90);
zig_f32(12.34f);
@@ -221,6 +243,14 @@ void c_u64(uint64_t x) {
assert_or_panic(x == 0xfffffffffffffffcULL);
}
+void c_u128(unsigned __int128 x) {
+ assert_or_panic(x == 0xfffffffffffffffcULL);
+}
+
+void c_struct_u128(struct u128 x) {
+ assert_or_panic(x.value == 0xfffffffffffffffcULL);
+}
+
void c_i8(int8_t x) {
assert_or_panic(x == -1);
}
@@ -237,6 +267,14 @@ void c_i64(int64_t x) {
assert_or_panic(x == -4);
}
+void c_i128(__int128 x) {
+ assert_or_panic(x == -5);
+}
+
+void c_struct_i128(struct i128 x) {
+ assert_or_panic(x.value == -6);
+}
+
void c_f32(float x) {
assert_or_panic(x == 12.34f);
}
diff --git a/test/stage1/c_abi/main.zig b/test/stage1/c_abi/main.zig
index abd9be4922..132f440dfc 100644
--- a/test/stage1/c_abi/main.zig
+++ b/test/stage1/c_abi/main.zig
@@ -16,10 +16,14 @@ extern fn c_u8(u8) void;
extern fn c_u16(u16) void;
extern fn c_u32(u32) void;
extern fn c_u64(u64) void;
+extern fn c_u128(u128) void;
+extern fn c_struct_u128(U128) void;
extern fn c_i8(i8) void;
extern fn c_i16(i16) void;
extern fn c_i32(i32) void;
extern fn c_i64(i64) void;
+extern fn c_i128(i128) void;
+extern fn c_struct_i128(I128) void;
// On windows x64, the first 4 are passed via registers, others on the stack.
extern fn c_five_integers(i32, i32, i32, i32, i32) void;
@@ -37,11 +41,15 @@ test "C ABI integers" {
c_u16(0xfffe);
c_u32(0xfffffffd);
c_u64(0xfffffffffffffffc);
+ c_u128(0xfffffffffffffffc);
+ c_struct_u128(.{ .value = 0xfffffffffffffffc });
c_i8(-1);
c_i16(-2);
c_i32(-3);
c_i64(-4);
+ c_i128(-5);
+ c_struct_i128(.{ .value = -6 });
c_five_integers(12, 34, 56, 78, 90);
}
@@ -57,6 +65,9 @@ export fn zig_u32(x: u32) void {
export fn zig_u64(x: u64) void {
expect(x == 0xfffffffffffffffc) catch @panic("test failure");
}
+export fn zig_u128(x: u128) void {
+ expect(x == 0xfffffffffffffffc) catch @panic("test failure");
+}
export fn zig_i8(x: i8) void {
expect(x == -1) catch @panic("test failure");
}
@@ -69,6 +80,22 @@ export fn zig_i32(x: i32) void {
export fn zig_i64(x: i64) void {
expect(x == -4) catch @panic("test failure");
}
+export fn zig_i128(x: i128) void {
+ expect(x == -5) catch @panic("test failure");
+}
+
+const I128 = extern struct {
+ value: i128,
+};
+const U128 = extern struct {
+ value: u128,
+};
+export fn zig_struct_i128(a: I128) void {
+ expect(a.value == -6) catch @panic("test failure");
+}
+export fn zig_struct_u128(a: U128) void {
+ expect(a.value == 0xfffffffffffffffc) catch @panic("test failure");
+}
extern fn c_f32(f32) void;
extern fn c_f64(f64) void;
From e06ac9537e94cf4863de7ca64112d7a63af60008 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 21:10:29 -0700
Subject: [PATCH 0096/2031] stage2: fix x86_64-windows C ABI
It didn't return integer status for pointers and also it incorrectly
returned memory for optionals sometimes.
---
src/arch/x86_64/abi.zig | 30 +++++++++++++++++++++++++++---
1 file changed, 27 insertions(+), 3 deletions(-)
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index a6c71d398f..b0ab1acefd 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -18,10 +18,34 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
else => return .memory,
}
return switch (ty.zigTypeTag()) {
- .Int, .Bool, .Enum, .Void, .NoReturn, .ErrorSet, .Struct, .Union => .integer,
- .Optional => if (ty.isPtrLikeOptional()) return .integer else return .memory,
+ .Pointer,
+ .Int,
+ .Bool,
+ .Enum,
+ .Void,
+ .NoReturn,
+ .ErrorSet,
+ .Struct,
+ .Union,
+ .Optional,
+ .Array,
+ .ErrorUnion,
+ .AnyFrame,
+ .Frame,
+ => .integer,
+
.Float, .Vector => .sse,
- else => unreachable,
+
+ .Type,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .BoundFn,
+ .Fn,
+ .Opaque,
+ .EnumLiteral,
+ => unreachable,
};
}
From a67893b0e124b95f5e1fade0245fef7ebb28b190 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 21:11:53 -0700
Subject: [PATCH 0097/2031] stage1: fix x86_64-windows C ABI classification
logic
16 bytes vectors are special cased because compiler-rt currently relies
on this.
---
src/stage1/analyze.cpp | 64 +++++++++++++++++++++++++++++++++---------
1 file changed, 50 insertions(+), 14 deletions(-)
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index 116a205df2..ff925f265f 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -8279,23 +8279,53 @@ Error file_fetch(CodeGen *g, Buf *resolved_path, Buf *contents_buf) {
static X64CABIClass type_windows_abi_x86_64_class(CodeGen *g, ZigType *ty, size_t ty_size) {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
+ switch (ty_size) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ case 16:
+ return (ty->id == ZigTypeIdVector) ? X64CABIClass_SSE : X64CABIClass_MEMORY;
+ default:
+ return X64CABIClass_MEMORY;
+ }
switch (ty->id) {
- case ZigTypeIdEnum:
+ case ZigTypeIdInvalid:
+ case ZigTypeIdMetaType:
+ case ZigTypeIdComptimeFloat:
+ case ZigTypeIdComptimeInt:
+ case ZigTypeIdNull:
+ case ZigTypeIdUndefined:
+ case ZigTypeIdBoundFn:
+ case ZigTypeIdOpaque:
+ case ZigTypeIdEnumLiteral:
+ zig_unreachable();
+
+ case ZigTypeIdFn:
+ case ZigTypeIdPointer:
case ZigTypeIdInt:
case ZigTypeIdBool:
+ case ZigTypeIdEnum:
+ case ZigTypeIdVoid:
+ case ZigTypeIdUnreachable:
+ case ZigTypeIdErrorSet:
+ case ZigTypeIdErrorUnion:
+ case ZigTypeIdStruct:
+ case ZigTypeIdUnion:
+ case ZigTypeIdOptional:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return X64CABIClass_INTEGER;
+
case ZigTypeIdFloat:
case ZigTypeIdVector:
return X64CABIClass_SSE;
- case ZigTypeIdStruct:
- case ZigTypeIdUnion: {
- if (ty_size <= 8)
- return X64CABIClass_INTEGER;
- return X64CABIClass_MEMORY;
- }
- default:
+
+ case ZigTypeIdArray:
return X64CABIClass_Unknown;
}
+ zig_unreachable();
}
static X64CABIClass type_system_V_abi_x86_64_class(CodeGen *g, ZigType *ty, size_t ty_size) {
@@ -8374,17 +8404,19 @@ static X64CABIClass type_system_V_abi_x86_64_class(CodeGen *g, ZigType *ty, size
X64CABIClass type_c_abi_x86_64_class(CodeGen *g, ZigType *ty) {
Error err;
-
const size_t ty_size = type_size(g, ty);
+
+ if (g->zig_target->os == OsWindows || g->zig_target->os == OsUefi) {
+ return type_windows_abi_x86_64_class(g, ty, ty_size);
+ }
+
ZigType *ptr_type;
if ((err = get_codegen_ptr_type(g, ty, &ptr_type))) return X64CABIClass_Unknown;
if (ptr_type != nullptr)
return X64CABIClass_INTEGER;
- if (g->zig_target->os == OsWindows || g->zig_target->os == OsUefi) {
- return type_windows_abi_x86_64_class(g, ty, ty_size);
- } else if (g->zig_target->arch == ZigLLVM_aarch64 ||
- g->zig_target->arch == ZigLLVM_aarch64_be)
+ if (g->zig_target->arch == ZigLLVM_aarch64 ||
+ g->zig_target->arch == ZigLLVM_aarch64_be)
{
X64CABIClass result = type_system_V_abi_x86_64_class(g, ty, ty_size);
return (result == X64CABIClass_MEMORY) ? X64CABIClass_MEMORY_nobyval : result;
@@ -8989,8 +9021,12 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
struct_type->data.structure.llvm_full_type_queue_index = SIZE_MAX;
}
- if (struct_type->abi_size <= 16 && (struct_type->data.structure.layout == ContainerLayoutExtern || struct_type->data.structure.layout == ContainerLayoutPacked))
+ if (struct_type->abi_size <= 16 &&
+ (struct_type->data.structure.layout == ContainerLayoutExtern ||
+ struct_type->data.structure.layout == ContainerLayoutPacked))
+ {
resolve_llvm_c_abi_type(g, struct_type);
+ }
}
// This is to be used instead of void for debug info types, to avoid tripping
From a35eb9fe8ae95353155423901089cbc03d036bf3 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 21:12:32 -0700
Subject: [PATCH 0098/2031] c_abi tests: allow passing standard target options
---
test/stage1/c_abi/build.zig | 3 +++
1 file changed, 3 insertions(+)
diff --git a/test/stage1/c_abi/build.zig b/test/stage1/c_abi/build.zig
index cf21d403f7..b9151f6dda 100644
--- a/test/stage1/c_abi/build.zig
+++ b/test/stage1/c_abi/build.zig
@@ -2,15 +2,18 @@ const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
const rel_opts = b.standardReleaseOptions();
+ const target = b.standardTargetOptions(.{});
const c_obj = b.addObject("cfuncs", null);
c_obj.addCSourceFile("cfuncs.c", &[_][]const u8{"-std=c99"});
c_obj.setBuildMode(rel_opts);
c_obj.linkSystemLibrary("c");
+ c_obj.target = target;
const main = b.addTest("main.zig");
main.setBuildMode(rel_opts);
main.addObject(c_obj);
+ main.target = target;
const test_step = b.step("test", "Test the program");
test_step.dependOn(&main.step);
From 08808701d2b70de41535bb35e4724c8279ea07b7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 21:12:50 -0700
Subject: [PATCH 0099/2031] C ABI tests: give a clue in addition to "test
failure"
---
test/stage1/c_abi/main.zig | 72 +++++++++++++++++++-------------------
1 file changed, 36 insertions(+), 36 deletions(-)
diff --git a/test/stage1/c_abi/main.zig b/test/stage1/c_abi/main.zig
index 132f440dfc..bd3fec15a8 100644
--- a/test/stage1/c_abi/main.zig
+++ b/test/stage1/c_abi/main.zig
@@ -29,11 +29,11 @@ extern fn c_struct_i128(I128) void;
extern fn c_five_integers(i32, i32, i32, i32, i32) void;
export fn zig_five_integers(a: i32, b: i32, c: i32, d: i32, e: i32) void {
- expect(a == 12) catch @panic("test failure");
- expect(b == 34) catch @panic("test failure");
- expect(c == 56) catch @panic("test failure");
- expect(d == 78) catch @panic("test failure");
- expect(e == 90) catch @panic("test failure");
+ expect(a == 12) catch @panic("test failure: zig_five_integers 12");
+ expect(b == 34) catch @panic("test failure: zig_five_integers 34");
+ expect(c == 56) catch @panic("test failure: zig_five_integers 56");
+ expect(d == 78) catch @panic("test failure: zig_five_integers 78");
+ expect(e == 90) catch @panic("test failure: zig_five_integers 90");
}
test "C ABI integers" {
@@ -54,34 +54,34 @@ test "C ABI integers" {
}
export fn zig_u8(x: u8) void {
- expect(x == 0xff) catch @panic("test failure");
+ expect(x == 0xff) catch @panic("test failure: zig_u8");
}
export fn zig_u16(x: u16) void {
- expect(x == 0xfffe) catch @panic("test failure");
+ expect(x == 0xfffe) catch @panic("test failure: zig_u16");
}
export fn zig_u32(x: u32) void {
- expect(x == 0xfffffffd) catch @panic("test failure");
+ expect(x == 0xfffffffd) catch @panic("test failure: zig_u32");
}
export fn zig_u64(x: u64) void {
- expect(x == 0xfffffffffffffffc) catch @panic("test failure");
+ expect(x == 0xfffffffffffffffc) catch @panic("test failure: zig_u64");
}
export fn zig_u128(x: u128) void {
- expect(x == 0xfffffffffffffffc) catch @panic("test failure");
+ expect(x == 0xfffffffffffffffc) catch @panic("test failure: zig_u128");
}
export fn zig_i8(x: i8) void {
- expect(x == -1) catch @panic("test failure");
+ expect(x == -1) catch @panic("test failure: zig_i8");
}
export fn zig_i16(x: i16) void {
- expect(x == -2) catch @panic("test failure");
+ expect(x == -2) catch @panic("test failure: zig_i16");
}
export fn zig_i32(x: i32) void {
- expect(x == -3) catch @panic("test failure");
+ expect(x == -3) catch @panic("test failure: zig_i32");
}
export fn zig_i64(x: i64) void {
- expect(x == -4) catch @panic("test failure");
+ expect(x == -4) catch @panic("test failure: zig_i64");
}
export fn zig_i128(x: i128) void {
- expect(x == -5) catch @panic("test failure");
+ expect(x == -5) catch @panic("test failure: zig_i128");
}
const I128 = extern struct {
@@ -91,10 +91,10 @@ const U128 = extern struct {
value: u128,
};
export fn zig_struct_i128(a: I128) void {
- expect(a.value == -6) catch @panic("test failure");
+ expect(a.value == -6) catch @panic("test failure: zig_struct_i128");
}
export fn zig_struct_u128(a: U128) void {
- expect(a.value == 0xfffffffffffffffc) catch @panic("test failure");
+ expect(a.value == 0xfffffffffffffffc) catch @panic("test failure: zig_struct_u128");
}
extern fn c_f32(f32) void;
@@ -104,11 +104,11 @@ extern fn c_f64(f64) void;
extern fn c_five_floats(f32, f32, f32, f32, f32) void;
export fn zig_five_floats(a: f32, b: f32, c: f32, d: f32, e: f32) void {
- expect(a == 1.0) catch @panic("test failure");
- expect(b == 2.0) catch @panic("test failure");
- expect(c == 3.0) catch @panic("test failure");
- expect(d == 4.0) catch @panic("test failure");
- expect(e == 5.0) catch @panic("test failure");
+ expect(a == 1.0) catch @panic("test failure: zig_five_floats 1.0");
+ expect(b == 2.0) catch @panic("test failure: zig_five_floats 2.0");
+ expect(c == 3.0) catch @panic("test failure: zig_five_floats 3.0");
+ expect(d == 4.0) catch @panic("test failure: zig_five_floats 4.0");
+ expect(e == 5.0) catch @panic("test failure: zig_five_floats 5.0");
}
test "C ABI floats" {
@@ -118,10 +118,10 @@ test "C ABI floats" {
}
export fn zig_f32(x: f32) void {
- expect(x == 12.34) catch @panic("test failure");
+ expect(x == 12.34) catch @panic("test failure: zig_f32");
}
export fn zig_f64(x: f64) void {
- expect(x == 56.78) catch @panic("test failure");
+ expect(x == 56.78) catch @panic("test failure: zig_f64");
}
extern fn c_ptr(*anyopaque) void;
@@ -131,7 +131,7 @@ test "C ABI pointer" {
}
export fn zig_ptr(x: *anyopaque) void {
- expect(@ptrToInt(x) == 0xdeadbeef) catch @panic("test failure");
+ expect(@ptrToInt(x) == 0xdeadbeef) catch @panic("test failure: zig_ptr");
}
extern fn c_bool(bool) void;
@@ -141,7 +141,7 @@ test "C ABI bool" {
}
export fn zig_bool(x: bool) void {
- expect(x) catch @panic("test failure");
+ expect(x) catch @panic("test failure: zig_bool");
}
const BigStruct = extern struct {
@@ -165,11 +165,11 @@ test "C ABI big struct" {
}
export fn zig_big_struct(x: BigStruct) void {
- expect(x.a == 1) catch @panic("test failure");
- expect(x.b == 2) catch @panic("test failure");
- expect(x.c == 3) catch @panic("test failure");
- expect(x.d == 4) catch @panic("test failure");
- expect(x.e == 5) catch @panic("test failure");
+ expect(x.a == 1) catch @panic("test failure: zig_big_struct 1");
+ expect(x.b == 2) catch @panic("test failure: zig_big_struct 2");
+ expect(x.c == 3) catch @panic("test failure: zig_big_struct 3");
+ expect(x.d == 4) catch @panic("test failure: zig_big_struct 4");
+ expect(x.e == 5) catch @panic("test failure: zig_big_struct 5");
}
const BigUnion = extern union {
@@ -191,11 +191,11 @@ test "C ABI big union" {
}
export fn zig_big_union(x: BigUnion) void {
- expect(x.a.a == 1) catch @panic("test failure");
- expect(x.a.b == 2) catch @panic("test failure");
- expect(x.a.c == 3) catch @panic("test failure");
- expect(x.a.d == 4) catch @panic("test failure");
- expect(x.a.e == 5) catch @panic("test failure");
+ expect(x.a.a == 1) catch @panic("test failure: zig_big_union a");
+ expect(x.a.b == 2) catch @panic("test failure: zig_big_union b");
+ expect(x.a.c == 3) catch @panic("test failure: zig_big_union c");
+ expect(x.a.d == 4) catch @panic("test failure: zig_big_union d");
+ expect(x.a.e == 5) catch @panic("test failure: zig_big_union e");
}
const MedStructMixed = extern struct {
From 77a6031edbd17dfdf654f24d139f27ac7adeb82f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 21:15:00 -0700
Subject: [PATCH 0100/2031] C ABI: these tests are not passing yet on Windows
I was too greedy
---
test/stage1/c_abi/cfuncs.c | 12 ------------
test/stage1/c_abi/main.zig | 10 ----------
2 files changed, 22 deletions(-)
diff --git a/test/stage1/c_abi/cfuncs.c b/test/stage1/c_abi/cfuncs.c
index 28009e6fc7..f5c90adba0 100644
--- a/test/stage1/c_abi/cfuncs.c
+++ b/test/stage1/c_abi/cfuncs.c
@@ -23,13 +23,11 @@ void zig_u8(uint8_t);
void zig_u16(uint16_t);
void zig_u32(uint32_t);
void zig_u64(uint64_t);
-void zig_u128(unsigned __int128);
void zig_struct_u128(struct u128);
void zig_i8(int8_t);
void zig_i16(int16_t);
void zig_i32(int32_t);
void zig_i64(int64_t);
-void zig_i128(__int128);
void zig_struct_i128(struct i128);
void zig_five_integers(int32_t, int32_t, int32_t, int32_t, int32_t);
@@ -142,7 +140,6 @@ void run_c_tests(void) {
zig_u16(0xfffe);
zig_u32(0xfffffffd);
zig_u64(0xfffffffffffffffc);
- zig_u128(0xfffffffffffffffc);
{
struct u128 s = {0xfffffffffffffffc};
zig_struct_u128(s);
@@ -152,7 +149,6 @@ void run_c_tests(void) {
zig_i16(-2);
zig_i32(-3);
zig_i64(-4);
- zig_i128(-5);
{
struct i128 s = {-6};
zig_struct_i128(s);
@@ -243,10 +239,6 @@ void c_u64(uint64_t x) {
assert_or_panic(x == 0xfffffffffffffffcULL);
}
-void c_u128(unsigned __int128 x) {
- assert_or_panic(x == 0xfffffffffffffffcULL);
-}
-
void c_struct_u128(struct u128 x) {
assert_or_panic(x.value == 0xfffffffffffffffcULL);
}
@@ -267,10 +259,6 @@ void c_i64(int64_t x) {
assert_or_panic(x == -4);
}
-void c_i128(__int128 x) {
- assert_or_panic(x == -5);
-}
-
void c_struct_i128(struct i128 x) {
assert_or_panic(x.value == -6);
}
diff --git a/test/stage1/c_abi/main.zig b/test/stage1/c_abi/main.zig
index bd3fec15a8..71a53bedea 100644
--- a/test/stage1/c_abi/main.zig
+++ b/test/stage1/c_abi/main.zig
@@ -16,13 +16,11 @@ extern fn c_u8(u8) void;
extern fn c_u16(u16) void;
extern fn c_u32(u32) void;
extern fn c_u64(u64) void;
-extern fn c_u128(u128) void;
extern fn c_struct_u128(U128) void;
extern fn c_i8(i8) void;
extern fn c_i16(i16) void;
extern fn c_i32(i32) void;
extern fn c_i64(i64) void;
-extern fn c_i128(i128) void;
extern fn c_struct_i128(I128) void;
// On windows x64, the first 4 are passed via registers, others on the stack.
@@ -41,14 +39,12 @@ test "C ABI integers" {
c_u16(0xfffe);
c_u32(0xfffffffd);
c_u64(0xfffffffffffffffc);
- c_u128(0xfffffffffffffffc);
c_struct_u128(.{ .value = 0xfffffffffffffffc });
c_i8(-1);
c_i16(-2);
c_i32(-3);
c_i64(-4);
- c_i128(-5);
c_struct_i128(.{ .value = -6 });
c_five_integers(12, 34, 56, 78, 90);
}
@@ -65,9 +61,6 @@ export fn zig_u32(x: u32) void {
export fn zig_u64(x: u64) void {
expect(x == 0xfffffffffffffffc) catch @panic("test failure: zig_u64");
}
-export fn zig_u128(x: u128) void {
- expect(x == 0xfffffffffffffffc) catch @panic("test failure: zig_u128");
-}
export fn zig_i8(x: i8) void {
expect(x == -1) catch @panic("test failure: zig_i8");
}
@@ -80,9 +73,6 @@ export fn zig_i32(x: i32) void {
export fn zig_i64(x: i64) void {
expect(x == -4) catch @panic("test failure: zig_i64");
}
-export fn zig_i128(x: i128) void {
- expect(x == -5) catch @panic("test failure: zig_i128");
-}
const I128 = extern struct {
value: i128,
From 470c8ca48c32d34da06e4741b8f81b6eb0d72fd7 Mon Sep 17 00:00:00 2001
From: billzez <77312308+billzez@users.noreply.github.com>
Date: Tue, 8 Feb 2022 23:35:48 -0500
Subject: [PATCH 0101/2031] update RwLock to use static initialization (#10838)
---
lib/std/Thread.zig | 1 +
lib/std/Thread/RwLock.zig | 77 +++++++--------------------------------
2 files changed, 14 insertions(+), 64 deletions(-)
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 182f7ccb6b..1e54053146 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -16,6 +16,7 @@ pub const StaticResetEvent = @import("Thread/StaticResetEvent.zig");
pub const Mutex = @import("Thread/Mutex.zig");
pub const Semaphore = @import("Thread/Semaphore.zig");
pub const Condition = @import("Thread/Condition.zig");
+pub const RwLock = @import("Thread/RwLock.zig");
pub const use_pthreads = target.os.tag != .windows and target.os.tag != .wasi and builtin.link_libc;
const is_gnu = target.abi.isGnu();
diff --git a/lib/std/Thread/RwLock.zig b/lib/std/Thread/RwLock.zig
index cfe06c76e8..6cce7d1217 100644
--- a/lib/std/Thread/RwLock.zig
+++ b/lib/std/Thread/RwLock.zig
@@ -3,15 +3,12 @@
//! This API requires being initialized at runtime, and initialization
//! can fail. Once initialized, the core operations cannot fail.
-impl: Impl,
+impl: Impl = .{},
const RwLock = @This();
const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
-const Mutex = std.Thread.Mutex;
-const Semaphore = std.Semaphore;
-const CondVar = std.CondVar;
pub const Impl = if (builtin.single_threaded)
SingleThreadedRwLock
@@ -20,14 +17,6 @@ else if (std.Thread.use_pthreads)
else
DefaultRwLock;
-pub fn init(rwl: *RwLock) void {
- return rwl.impl.init();
-}
-
-pub fn deinit(rwl: *RwLock) void {
- return rwl.impl.deinit();
-}
-
/// Attempts to obtain exclusive lock ownership.
/// Returns `true` if the lock is obtained, `false` otherwise.
pub fn tryLock(rwl: *RwLock) bool {
@@ -64,20 +53,8 @@ pub fn unlockShared(rwl: *RwLock) void {
/// Single-threaded applications use this for deadlock checks in
/// debug mode, and no-ops in release modes.
pub const SingleThreadedRwLock = struct {
- state: enum { unlocked, locked_exclusive, locked_shared },
- shared_count: usize,
-
- pub fn init(rwl: *SingleThreadedRwLock) void {
- rwl.* = .{
- .state = .unlocked,
- .shared_count = 0,
- };
- }
-
- pub fn deinit(rwl: *SingleThreadedRwLock) void {
- assert(rwl.state == .unlocked);
- assert(rwl.shared_count == 0);
- }
+ state: enum { unlocked, locked_exclusive, locked_shared } = .unlocked,
+ shared_count: usize = 0,
/// Attempts to obtain exclusive lock ownership.
/// Returns `true` if the lock is obtained, `false` otherwise.
@@ -152,55 +129,41 @@ pub const SingleThreadedRwLock = struct {
};
pub const PthreadRwLock = struct {
- rwlock: pthread_rwlock_t,
-
- pub fn init(rwl: *PthreadRwLock) void {
- rwl.* = .{ .rwlock = .{} };
- }
-
- pub fn deinit(rwl: *PthreadRwLock) void {
- const safe_rc: std.os.E = switch (builtin.os.tag) {
- .dragonfly, .netbsd => .AGAIN,
- else => .SUCCESS,
- };
- const rc = std.c.pthread_rwlock_destroy(&rwl.rwlock);
- assert(rc == .SUCCESS or rc == safe_rc);
- rwl.* = undefined;
- }
+ rwlock: std.c.pthread_rwlock_t = .{},
pub fn tryLock(rwl: *PthreadRwLock) bool {
- return pthread_rwlock_trywrlock(&rwl.rwlock) == .SUCCESS;
+ return std.c.pthread_rwlock_trywrlock(&rwl.rwlock) == .SUCCESS;
}
pub fn lock(rwl: *PthreadRwLock) void {
- const rc = pthread_rwlock_wrlock(&rwl.rwlock);
+ const rc = std.c.pthread_rwlock_wrlock(&rwl.rwlock);
assert(rc == .SUCCESS);
}
pub fn unlock(rwl: *PthreadRwLock) void {
- const rc = pthread_rwlock_unlock(&rwl.rwlock);
+ const rc = std.c.pthread_rwlock_unlock(&rwl.rwlock);
assert(rc == .SUCCESS);
}
pub fn tryLockShared(rwl: *PthreadRwLock) bool {
- return pthread_rwlock_tryrdlock(&rwl.rwlock) == .SUCCESS;
+ return std.c.pthread_rwlock_tryrdlock(&rwl.rwlock) == .SUCCESS;
}
pub fn lockShared(rwl: *PthreadRwLock) void {
- const rc = pthread_rwlock_rdlock(&rwl.rwlock);
+ const rc = std.c.pthread_rwlock_rdlock(&rwl.rwlock);
assert(rc == .SUCCESS);
}
pub fn unlockShared(rwl: *PthreadRwLock) void {
- const rc = pthread_rwlock_unlock(&rwl.rwlock);
+ const rc = std.c.pthread_rwlock_unlock(&rwl.rwlock);
assert(rc == .SUCCESS);
}
};
pub const DefaultRwLock = struct {
- state: usize,
- mutex: Mutex,
- semaphore: Semaphore,
+ state: usize = 0,
+ mutex: std.Thread.Mutex = .{},
+ semaphore: std.Thread.Semaphore = .{},
const IS_WRITING: usize = 1;
const WRITER: usize = 1 << 1;
@@ -209,20 +172,6 @@ pub const DefaultRwLock = struct {
const READER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, READER);
const Count = std.meta.Int(.unsigned, @divFloor(std.meta.bitCount(usize) - 1, 2));
- pub fn init(rwl: *DefaultRwLock) void {
- rwl.* = .{
- .state = 0,
- .mutex = Mutex.init(),
- .semaphore = Semaphore.init(0),
- };
- }
-
- pub fn deinit(rwl: *DefaultRwLock) void {
- rwl.semaphore.deinit();
- rwl.mutex.deinit();
- rwl.* = undefined;
- }
-
pub fn tryLock(rwl: *DefaultRwLock) bool {
if (rwl.mutex.tryLock()) {
const state = @atomicLoad(usize, &rwl.state, .SeqCst);
From 59418d1bf6f82866a1c8f3b35fd815bb7add5129 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 22:09:41 -0700
Subject: [PATCH 0102/2031] Sema: fix Value.intFitsInType for comptime int
---
src/value.zig | 34 +++++++++++++++++++++-------------
test/behavior/slice.zig | 2 --
2 files changed, 21 insertions(+), 15 deletions(-)
diff --git a/src/value.zig b/src/value.zig
index acc3fa3d74..9e1f4c0ed6 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1331,12 +1331,16 @@ pub const Value = extern union {
.one,
.bool_true,
- => {
- const info = ty.intInfo(target);
- return switch (info.signedness) {
- .signed => info.bits >= 2,
- .unsigned => info.bits >= 1,
- };
+ => switch (ty.zigTypeTag()) {
+ .Int => {
+ const info = ty.intInfo(target);
+ return switch (info.signedness) {
+ .signed => info.bits >= 2,
+ .unsigned => info.bits >= 1,
+ };
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
},
.int_u64 => switch (ty.zigTypeTag()) {
@@ -1390,13 +1394,17 @@ pub const Value = extern union {
.decl_ref,
.function,
.variable,
- => {
- const info = ty.intInfo(target);
- const ptr_bits = target.cpu.arch.ptrBitWidth();
- return switch (info.signedness) {
- .signed => info.bits > ptr_bits,
- .unsigned => info.bits >= ptr_bits,
- };
+ => switch (ty.zigTypeTag()) {
+ .Int => {
+ const info = ty.intInfo(target);
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ return switch (info.signedness) {
+ .signed => info.bits > ptr_bits,
+ .unsigned => info.bits >= ptr_bits,
+ };
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
},
else => unreachable,
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 4ec5f11817..902ba49a6f 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -245,8 +245,6 @@ test "C pointer slice access" {
}
test "comptime slices are disambiguated" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
try expect(sliceSum(&[_]u8{ 1, 2 }) == 3);
try expect(sliceSum(&[_]u8{ 3, 4 }) == 7);
}
From 1678825c1450b29a1016bba62511388b3e539cd8 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 22:51:46 -0700
Subject: [PATCH 0103/2031] Sema: fix `@ptrCast` from slices
Also, fix allocations in comptime contexts with alignments.
---
src/Sema.zig | 31 +++++++++++++++++++++----------
test/behavior/slice.zig | 2 --
2 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 69b9adc54b..b202cb696f 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2318,7 +2318,7 @@ fn zirAllocExtended(
else
Type.initTag(.inferred_alloc_mut);
- if (small.is_comptime) {
+ if (block.is_comptime or small.is_comptime) {
if (small.has_type) {
return sema.analyzeComptimeAlloc(block, var_ty, alignment, ty_src);
} else {
@@ -2379,7 +2379,10 @@ fn zirAllocInferredComptime(
sema.src = src;
return sema.addConstant(
inferred_alloc_ty,
- try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined),
+ try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{
+ .decl = undefined,
+ .alignment = 0,
+ }),
);
}
@@ -2440,7 +2443,10 @@ fn zirAllocInferred(
if (block.is_comptime) {
return sema.addConstant(
inferred_alloc_ty,
- try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined),
+ try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{
+ .decl = undefined,
+ .alignment = 0,
+ }),
);
}
@@ -11341,7 +11347,14 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_ty = sema.typeOf(operand);
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
- return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src);
+ if (dest_ty.isSlice()) {
+ return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{});
+ }
+ const ptr = if (operand_ty.isSlice())
+ try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty)
+ else
+ operand;
+ return sema.coerceCompatiblePtrs(block, dest_ty, ptr, operand_src);
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -13081,7 +13094,7 @@ fn fieldVal(
try sema.analyzeLoad(block, src, object, object_src)
else
object;
- return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src);
+ return sema.analyzeSlicePtr(block, object_src, slice, inner_ty);
} else if (mem.eql(u8, field_name, "len")) {
const slice = if (is_pointer_to)
try sema.analyzeLoad(block, src, object, object_src)
@@ -15584,19 +15597,17 @@ fn analyzeLoad(
fn analyzeSlicePtr(
sema: *Sema,
block: *Block,
- src: LazySrcLoc,
+ slice_src: LazySrcLoc,
slice: Air.Inst.Ref,
slice_ty: Type,
- slice_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer);
const result_ty = slice_ty.slicePtrFieldType(buf);
-
if (try sema.resolveMaybeUndefVal(block, slice_src, slice)) |val| {
if (val.isUndef()) return sema.addConstUndef(result_ty);
return sema.addConstant(result_ty, val.slicePtr());
}
- try sema.requireRuntimeBlock(block, src);
+ try sema.requireRuntimeBlock(block, slice_src);
return block.addTyOp(.slice_ptr, result_ty, slice);
}
@@ -15729,7 +15740,7 @@ fn analyzeSlice(
}
const ptr = if (slice_ty.isSlice())
- try sema.analyzeSlicePtr(block, src, ptr_or_slice, slice_ty, ptr_src)
+ try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
else
ptr_or_slice;
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 902ba49a6f..64bd972ead 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -312,8 +312,6 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
const S = struct {
fn doTheTest() !void {
var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff };
From f4fa32a63219917e8fb26f43cbd2d97b17e0aeee Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 8 Feb 2022 23:02:13 -0700
Subject: [PATCH 0104/2031] Sema: fix `@typeInfo` for pointers returning 0
alignment
---
src/Sema.zig | 7 ++++++-
src/type.zig | 2 +-
test/behavior/type_info.zig | 10 ----------
3 files changed, 7 insertions(+), 12 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index b202cb696f..df5013fbaf 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -9651,6 +9651,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.Pointer => {
const info = ty.ptrInfo().data;
+ const alignment = if (info.@"align" != 0)
+ info.@"align"
+ else
+ info.pointee_type.abiAlignment(target);
+
const field_values = try sema.arena.alloc(Value, 8);
// size: Size,
field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size));
@@ -9659,7 +9664,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_volatile: bool,
field_values[2] = if (info.@"volatile") Value.@"true" else Value.@"false";
// alignment: comptime_int,
- field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align");
+ field_values[3] = try Value.Tag.int_u64.create(sema.arena, alignment);
// address_space: AddressSpace
field_values[4] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace"));
// child: type,
diff --git a/src/type.zig b/src/type.zig
index e3a4b3d60a..769e48ccc5 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4687,7 +4687,7 @@ pub const Type = extern union {
pub const Data = struct {
pointee_type: Type,
sentinel: ?Value = null,
- /// If zero use pointee_type.AbiAlign()
+ /// If zero use pointee_type.abiAlignment()
@"align": u32 = 0,
/// See src/target.zig defaultAddressSpace function for how to obtain
/// an appropriate value for this field.
diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig
index 14adc4dad5..9f90088ed9 100644
--- a/test/behavior/type_info.zig
+++ b/test/behavior/type_info.zig
@@ -71,8 +71,6 @@ fn testBasic() !void {
}
test "type info: pointer type info" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
try testPointer();
comptime try testPointer();
}
@@ -89,8 +87,6 @@ fn testPointer() !void {
}
test "type info: unknown length pointer type info" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
try testUnknownLenPtr();
comptime try testUnknownLenPtr();
}
@@ -125,8 +121,6 @@ fn testNullTerminatedPtr() !void {
}
test "type info: slice type info" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
try testSlice();
comptime try testSlice();
}
@@ -306,8 +300,6 @@ const TestStruct = packed struct {
};
test "type info: opaque info" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
try testOpaque();
comptime try testOpaque();
}
@@ -417,8 +409,6 @@ test "type info: TypeId -> TypeInfo impl cast" {
}
test "sentinel of opaque pointer type" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
const c_void_info = @typeInfo(*anyopaque);
try expect(c_void_info.Pointer.sentinel == null);
}
From 97019bc56d27349e0aeb44faa9d3f738887abe7f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 00:10:53 -0700
Subject: [PATCH 0105/2031] Sema: handle inferred error set tail call
When Sema sees a store_node instruction, it now checks for
the possibility of this pattern:
%a = ret_ptr
%b = store(%a, %c)
Where %c is an error union. In such case we need to add to the
current function's inferred error set, if any.
Coercion from error union to error union will be handled ideally if the
operand is comptime known. In such case it does the appropriate
unwrapping, then wraps again.
In the future, coercion from error union to error union should do the
same thing for a runtime value; emitting a runtime branch to check if
the value is an error or not.
`Value.arrayLen` for structs returns the number of fields. This is so
that Liveness can use it for the `vector_init` instruction (soon to be
renamed to `aggregate_init`).
---
src/Air.zig | 3 +-
src/Module.zig | 4 +--
src/Sema.zig | 85 +++++++++++++++++++++++++++++++++++++++-----------
src/type.zig | 3 +-
4 files changed, 73 insertions(+), 22 deletions(-)
diff --git a/src/Air.zig b/src/Air.zig
index 6888f51963..a044dd6294 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -521,7 +521,8 @@ pub const Inst = struct {
/// Some of the elements may be comptime-known.
/// Uses the `ty_pl` field, payload is index of an array of elements, each of which
/// is a `Ref`. Length of the array is given by the vector type.
- /// TODO rename this to `array_init` and make it support array values too.
+ /// TODO rename this to `aggregate_init` and make it support array values and
+ /// struct values too.
vector_init,
/// Communicates an intent to load memory.
diff --git a/src/Module.zig b/src/Module.zig
index bc806cfb9c..3631e41f25 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3547,7 +3547,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.code = file.zir,
.owner_decl = new_decl,
.func = null,
- .fn_ret_ty = Type.initTag(.void),
+ .fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
@@ -3628,7 +3628,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
.code = zir,
.owner_decl = decl,
.func = null,
- .fn_ret_ty = Type.initTag(.void),
+ .fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
diff --git a/src/Sema.zig b/src/Sema.zig
index df5013fbaf..38adfb4798 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3187,12 +3187,32 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
const tracy = trace(@src());
defer tracy.end();
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const zir_tags = sema.code.instructions.items(.tag);
+ const zir_datas = sema.code.instructions.items(.data);
+ const inst_data = zir_datas[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ptr = sema.resolveInst(extra.lhs);
- const value = sema.resolveInst(extra.rhs);
- return sema.storePtr(block, src, ptr, value);
+ const operand = sema.resolveInst(extra.rhs);
+
+ // Check for the possibility of this pattern:
+ // %a = ret_ptr
+ // %b = store(%a, %c)
+ // Where %c is an error union. In such case we need to add to the current function's
+ // inferred error set, if any.
+ if (sema.typeOf(operand).zigTypeTag() == .ErrorUnion and
+ sema.fn_ret_ty.zigTypeTag() == .ErrorUnion)
+ {
+ if (Zir.refToIndex(extra.lhs)) |ptr_index| {
+ if (zir_tags[ptr_index] == .extended and
+ zir_datas[ptr_index].extended.opcode == .ret_ptr)
+ {
+ try sema.addToInferredErrorSet(operand);
+ }
+ }
+ }
+
+ return sema.storePtr(block, src, ptr, operand);
}
fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -10400,6 +10420,23 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
return always_noreturn;
}
+fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
+ assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion);
+
+ if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
+ const op_ty = sema.typeOf(uncasted_operand);
+ switch (op_ty.zigTypeTag()) {
+ .ErrorSet => {
+ try payload.data.addErrorSet(sema.gpa, op_ty);
+ },
+ .ErrorUnion => {
+ try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet());
+ },
+ else => {},
+ }
+ }
+}
+
fn analyzeRet(
sema: *Sema,
block: *Block,
@@ -10410,18 +10447,7 @@ fn analyzeRet(
// add the error tag to the inferred error set of the in-scope function, so
// that the coercion below works correctly.
if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
- if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
- const op_ty = sema.typeOf(uncasted_operand);
- switch (op_ty.zigTypeTag()) {
- .ErrorSet => {
- try payload.data.addErrorSet(sema.gpa, op_ty);
- },
- .ErrorUnion => {
- try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet());
- },
- else => {},
- }
- }
+ try sema.addToInferredErrorSet(uncasted_operand);
}
const operand = try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src);
@@ -14355,9 +14381,32 @@ fn coerce(
},
else => {},
},
- .ErrorUnion => {
- // T to E!T or E to E!T
- return sema.wrapErrorUnion(block, dest_ty, inst, inst_src);
+ .ErrorUnion => switch (inst_ty.zigTypeTag()) {
+ .ErrorUnion => {
+ if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| {
+ switch (inst_val.tag()) {
+ .undef => return sema.addConstUndef(dest_ty),
+ .eu_payload => {
+ const payload = try sema.addConstant(
+ inst_ty.errorUnionPayload(),
+ inst_val.castTag(.eu_payload).?.data,
+ );
+ return sema.wrapErrorUnion(block, dest_ty, payload, inst_src);
+ },
+ else => {
+ const error_set = try sema.addConstant(
+ inst_ty.errorUnionSet(),
+ inst_val,
+ );
+ return sema.wrapErrorUnion(block, dest_ty, error_set, inst_src);
+ },
+ }
+ }
+ },
+ else => {
+ // T to E!T or E to E!T
+ return sema.wrapErrorUnion(block, dest_ty, inst, inst_src);
+ },
},
.Union => switch (inst_ty.zigTypeTag()) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
diff --git a/src/type.zig b/src/type.zig
index 769e48ccc5..0827b2e2d7 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -3013,7 +3013,7 @@ pub const Type = extern union {
}
}
- /// Asserts the type is an array or vector.
+ /// Asserts the type is an array or vector or struct.
pub fn arrayLen(ty: Type) u64 {
return switch (ty.tag()) {
.vector => ty.castTag(.vector).?.data.len,
@@ -3022,6 +3022,7 @@ pub const Type = extern union {
.array_u8 => ty.castTag(.array_u8).?.data,
.array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data,
.tuple => ty.castTag(.tuple).?.data.types.len,
+ .@"struct" => ty.castTag(.@"struct").?.data.fields.count(),
else => unreachable,
};
From e5ce87f1b198bfcb022e9ea91f2a9a58b1b75026 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 20:33:45 +0100
Subject: [PATCH 0106/2031] stage2: handle decl ref to void types
Fixes behavior test 1914
---
src/codegen.zig | 17 ++++++-----------
test/behavior/bugs/1914.zig | 4 ----
2 files changed, 6 insertions(+), 15 deletions(-)
diff --git a/src/codegen.zig b/src/codegen.zig
index 5873fd439c..d1c249d99d 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -487,19 +487,14 @@ fn lowerDeclRef(
return Result{ .appended = {} };
}
+ const target = bin_file.options.target;
+ const ptr_width = target.cpu.arch.ptrBitWidth();
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
- return Result{
- .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO handle void types when lowering decl ref",
- .{},
- ),
- };
+ try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8));
+ return Result{ .appended = {} };
}
- if (decl.analysis != .complete) return error.AnalysisFail;
decl.markAlive();
const vaddr = vaddr: {
if (bin_file.cast(link.File.MachO)) |macho_file| {
@@ -510,8 +505,8 @@ fn lowerDeclRef(
break :vaddr bin_file.getDeclVAddr(decl);
};
- const endian = bin_file.options.target.cpu.arch.endian();
- switch (bin_file.options.target.cpu.arch.ptrBitWidth()) {
+ const endian = target.cpu.arch.endian();
+ switch (ptr_width) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian),
32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, vaddr), endian),
64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
diff --git a/test/behavior/bugs/1914.zig b/test/behavior/bugs/1914.zig
index 6462937351..4ac2b929a2 100644
--- a/test/behavior/bugs/1914.zig
+++ b/test/behavior/bugs/1914.zig
@@ -13,8 +13,6 @@ const a = A{ .b_list_pointer = &b_list };
test "segfault bug" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const assert = std.debug.assert;
const obj = B{ .a_pointer = &a };
assert(obj.a_pointer == &a); // this makes zig crash
@@ -31,8 +29,6 @@ pub const B2 = struct {
var b_value = B2{ .pointer_array = &[_]*A2{} };
test "basic stuff" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
std.debug.assert(&b_value == &b_value);
}
From b28e9e42e07ae553fc35fb47c1ace619405c2b5c Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 23:28:39 +0100
Subject: [PATCH 0107/2031] stage2: resolve struct type when lowering
struct_field_*
---
src/Sema.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/Sema.zig b/src/Sema.zig
index 38adfb4798..4ff535d86e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -13647,6 +13647,7 @@ fn structFieldPtr(
assert(unresolved_struct_ty.zigTypeTag() == .Struct);
const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty);
+ try sema.resolveStructLayout(block, src, struct_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
From e588e3873cdb4a7f8e03085d83453f9f3d5e36a7 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 8 Feb 2022 23:30:23 +0100
Subject: [PATCH 0108/2031] stage2: export trunc, truncf and truncl
---
lib/std/special/c.zig | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index bc6d03bffd..a4aa4f66b2 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -6,7 +6,9 @@
const std = @import("std");
const builtin = @import("builtin");
+const math = std.math;
const native_os = builtin.os.tag;
+const long_double_is_f128 = builtin.target.longDoubleIsF128();
comptime {
// When the self-hosted compiler is further along, all the logic from c_stage1.zig will
@@ -15,6 +17,9 @@ comptime {
if (builtin.zig_backend != .stage1) {
@export(memset, .{ .name = "memset", .linkage = .Strong });
@export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
+ @export(trunc, .{ .name = "trunc", .linkage = .Strong });
+ @export(truncf, .{ .name = "truncf", .linkage = .Strong });
+ @export(truncl, .{ .name = "truncl", .linkage = .Strong });
} else {
_ = @import("c_stage1.zig");
}
@@ -74,3 +79,18 @@ fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(
return dest;
}
+
+fn trunc(a: f64) f64 {
+ return math.trunc(a);
+}
+
+fn truncf(a: f32) f32 {
+ return math.trunc(a);
+}
+
+fn truncl(a: c_longdouble) c_longdouble {
+ if (!long_double_is_f128) {
+ @panic("TODO implement this");
+ }
+ return math.trunc(a);
+}
From ec3e638b97c638a6d292902b18a6a685854d60b4 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 9 Feb 2022 13:22:50 +0100
Subject: [PATCH 0109/2031] elf: fix unaligned file offset of moved phdr
containing GOT section
---
src/link/Elf.zig | 54 ++++++++++++++++++++++++++++++++++--------------
1 file changed, 39 insertions(+), 15 deletions(-)
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 2a756b3347..9ab84de1ce 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2,6 +2,7 @@ const Elf = @This();
const std = @import("std");
const builtin = @import("builtin");
+const math = std.math;
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
@@ -64,6 +65,7 @@ phdr_load_rw_index: ?u16 = null,
phdr_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{},
entry_addr: ?u64 = null,
+page_size: u16,
debug_strtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
@@ -334,6 +336,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
};
const self = try gpa.create(Elf);
errdefer gpa.destroy(self);
+ const page_size: u16 = 0x1000; // TODO ppc64le requires 64KB
self.* = .{
.base = .{
@@ -343,6 +346,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
.file = null,
},
.ptr_width = ptr_width,
+ .page_size = page_size,
};
const use_llvm = build_options.have_llvm and options.use_llvm;
const use_stage1 = build_options.is_stage1 and options.use_stage1;
@@ -523,10 +527,11 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p64 => false,
};
const ptr_size: u8 = self.ptrWidthBytes();
+
if (self.phdr_load_re_index == null) {
self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
const file_size = self.base.options.program_code_size_hint;
- const p_align = 0x1000;
+ const p_align = self.page_size;
const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RE free space 0x{x} to 0x{x}", .{ off, off + file_size });
const entry_addr: u64 = self.entry_addr orelse if (self.base.options.target.cpu.arch == .spu_2) @as(u64, 0) else default_entry_addr;
@@ -544,12 +549,13 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.entry_addr = null;
self.phdr_table_dirty = true;
}
+
if (self.phdr_got_index == null) {
self.phdr_got_index = @intCast(u16, self.program_headers.items.len);
const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
// We really only need ptr alignment but since we are using PROGBITS, linux requires
// page align.
- const p_align = if (self.base.options.target.os.tag == .linux) 0x1000 else @as(u16, ptr_size);
+ const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size);
const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD GOT free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
@@ -568,16 +574,17 @@ pub fn populateMissingMetadata(self: *Elf) !void {
});
self.phdr_table_dirty = true;
}
+
if (self.phdr_load_ro_index == null) {
self.phdr_load_ro_index = @intCast(u16, self.program_headers.items.len);
// TODO Find a hint about how much data need to be in rodata ?
const file_size = 1024;
// Same reason as for GOT
- const p_align = if (self.base.options.target.os.tag == .linux) 0x1000 else @as(u16, ptr_size);
+ const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size);
const off = self.findFreeSpace(file_size, p_align);
- log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
- const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x5000000 else 0xa000;
+ const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
try self.program_headers.append(self.base.allocator, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
@@ -591,16 +598,17 @@ pub fn populateMissingMetadata(self: *Elf) !void {
try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_ro_index.?, .{});
self.phdr_table_dirty = true;
}
+
if (self.phdr_load_rw_index == null) {
self.phdr_load_rw_index = @intCast(u16, self.program_headers.items.len);
// TODO Find a hint about how much data need to be in data ?
const file_size = 1024;
// Same reason as for GOT
- const p_align = if (self.base.options.target.os.tag == .linux) 0x1000 else @as(u16, ptr_size);
+ const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size);
const off = self.findFreeSpace(file_size, p_align);
- log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
- const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x6000000 else 0xc000;
+ const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
try self.program_headers.append(self.base.allocator, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
@@ -614,6 +622,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_rw_index.?, .{});
self.phdr_table_dirty = true;
}
+
if (self.shstrtab_index == null) {
self.shstrtab_index = @intCast(u16, self.sections.items.len);
assert(self.shstrtab.items.len == 0);
@@ -635,6 +644,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shstrtab_dirty = true;
self.shdr_table_dirty = true;
}
+
if (self.text_section_index == null) {
self.text_section_index = @intCast(u16, self.sections.items.len);
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
@@ -648,7 +658,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = phdr.p_filesz,
.sh_link = 0,
.sh_info = 0,
- .sh_addralign = phdr.p_align,
+ .sh_addralign = 1,
.sh_entsize = 0,
});
try self.phdr_shdr_table.putNoClobber(
@@ -658,6 +668,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
);
self.shdr_table_dirty = true;
}
+
if (self.got_section_index == null) {
self.got_section_index = @intCast(u16, self.sections.items.len);
const phdr = &self.program_headers.items[self.phdr_got_index.?];
@@ -671,7 +682,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = phdr.p_filesz,
.sh_link = 0,
.sh_info = 0,
- .sh_addralign = phdr.p_align,
+ .sh_addralign = @as(u16, ptr_size),
.sh_entsize = 0,
});
try self.phdr_shdr_table.putNoClobber(
@@ -681,6 +692,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
);
self.shdr_table_dirty = true;
}
+
if (self.rodata_section_index == null) {
self.rodata_section_index = @intCast(u16, self.sections.items.len);
const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
@@ -694,7 +706,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = phdr.p_filesz,
.sh_link = 0,
.sh_info = 0,
- .sh_addralign = phdr.p_align,
+ .sh_addralign = 1,
.sh_entsize = 0,
});
try self.phdr_shdr_table.putNoClobber(
@@ -704,6 +716,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
);
self.shdr_table_dirty = true;
}
+
if (self.data_section_index == null) {
self.data_section_index = @intCast(u16, self.sections.items.len);
const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
@@ -717,7 +730,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = phdr.p_filesz,
.sh_link = 0,
.sh_info = 0,
- .sh_addralign = phdr.p_align,
+ .sh_addralign = @as(u16, ptr_size),
.sh_entsize = 0,
});
try self.phdr_shdr_table.putNoClobber(
@@ -727,6 +740,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
);
self.shdr_table_dirty = true;
}
+
if (self.symtab_section_index == null) {
self.symtab_section_index = @intCast(u16, self.sections.items.len);
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
@@ -751,6 +765,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shdr_table_dirty = true;
try self.writeSymbol(0);
}
+
if (self.debug_str_section_index == null) {
self.debug_str_section_index = @intCast(u16, self.sections.items.len);
assert(self.debug_strtab.items.len == 0);
@@ -769,6 +784,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.debug_strtab_dirty = true;
self.shdr_table_dirty = true;
}
+
if (self.debug_info_section_index == null) {
self.debug_info_section_index = @intCast(u16, self.sections.items.len);
@@ -794,6 +810,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shdr_table_dirty = true;
self.debug_info_header_dirty = true;
}
+
if (self.debug_abbrev_section_index == null) {
self.debug_abbrev_section_index = @intCast(u16, self.sections.items.len);
@@ -819,6 +836,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shdr_table_dirty = true;
self.debug_abbrev_section_dirty = true;
}
+
if (self.debug_aranges_section_index == null) {
self.debug_aranges_section_index = @intCast(u16, self.sections.items.len);
@@ -844,6 +862,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shdr_table_dirty = true;
self.debug_aranges_section_dirty = true;
}
+
if (self.debug_line_section_index == null) {
self.debug_line_section_index = @intCast(u16, self.sections.items.len);
@@ -869,6 +888,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shdr_table_dirty = true;
self.debug_line_header_dirty = true;
}
+
const shsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
.p64 => @sizeOf(elf.Elf64_Shdr),
@@ -881,6 +901,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
self.shdr_table_dirty = true;
}
+
const phsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Phdr),
.p64 => @sizeOf(elf.Elf64_Phdr),
@@ -893,6 +914,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign);
self.phdr_table_dirty = true;
}
+
{
// Iterate over symbols, populating free_list and last_text_block.
if (self.local_symbols.items.len != 1) {
@@ -2378,12 +2400,13 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const text_capacity = self.allocatedSize(shdr.sh_offset);
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
if (needed_size > text_capacity) {
- // Must move the entire text section.
- const new_offset = self.findFreeSpace(needed_size, 0x1000);
+ // Must move the entire section.
+ const new_offset = self.findFreeSpace(needed_size, self.page_size);
const text_size = if (self.atoms.get(phdr_index)) |last| blk: {
const sym = self.local_symbols.items[last.local_sym_index];
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else 0;
+ log.debug("new PT_LOAD file offset 0x{x} to 0x{x}", .{ new_offset, new_offset + text_size });
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, text_size);
if (amt != text_size) return error.InputOutput;
shdr.sh_offset = new_offset;
@@ -2407,6 +2430,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
self.shdr_table_dirty = true; // TODO look into making only the one section dirty
}
+ shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
// This function can also reallocate a text block.
// In this case we need to "unplug" it from its previous location before
@@ -3478,7 +3502,7 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const needed_size = self.offset_table.items.len * entry_size;
if (needed_size > allocated_size) {
// Must move the entire got section.
- const new_offset = self.findFreeSpace(needed_size, entry_size);
+ const new_offset = self.findFreeSpace(needed_size, self.page_size);
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, shdr.sh_size);
if (amt != shdr.sh_size) return error.InputOutput;
shdr.sh_offset = new_offset;
From 92cb17a331b0f2f3a89b8e5e6995e8e36b9f3679 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 10:40:59 -0700
Subject: [PATCH 0110/2031] CI: windows: update env var names
---
ci/azure/pipelines.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ci/azure/pipelines.yml b/ci/azure/pipelines.yml
index f0df4558c2..b1b99288e8 100644
--- a/ci/azure/pipelines.yml
+++ b/ci/azure/pipelines.yml
@@ -91,7 +91,7 @@ jobs:
- pwsh: |
Set-Variable -Name ZIGBUILDDIR -Value "$(Get-Location)\build"
- $Env:AWS_SHARED_CREDENTIALS_FILE = "$(aws_credentials.secureFilePath)"
+ $Env:AWS_SHARED_CREDENTIALS_FILE = "$Env:DOWNLOADSECUREFILE_SECUREFILEPATH"
cd "$ZIGBUILDDIR"
mv ../LICENSE dist/
@@ -118,7 +118,7 @@ jobs:
Set-Variable -Name SHASUM -Value (Get-FileHash "$TARBALL" -Algorithm SHA256 | select-object -ExpandProperty Hash)
Set-Variable -Name BYTESIZE -Value (Get-Item "$TARBALL").length
- Set-Variable -Name JSONFILE -Value "windows-${GITBRANCH}.json"
+ Set-Variable -Name JSONFILE -Value "windows-${Env:BUILD_SOURCEBRANCHNAME}.json"
echo $null > $JSONFILE
echo ('{"tarball": "' + $TARBALL + '",') >> $JSONFILE
echo ('"shasum": "' + $SHASUM + '",') >> $JSONFILE
From 2836cd5fbdcbb22b1e03c01005e0e09777c5475f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 11:36:30 -0700
Subject: [PATCH 0111/2031] CLI: ignore -lgcc_s when it is redundant with
compiler-rt
For some projects, they can't help themselves, -lgcc_s ends up on the
compiler command line even though it does not belong there. In Zig we
know what -lgcc_s does. It's an alternative to compiler-rt. With this
commit we emit a warning telling that it is unnecessary to put such
thing on the command line, and happily ignore it, since we will fulfill
the dependency with compiler-rt.
---
src/main.zig | 5 +++++
src/target.zig | 10 ++++++++++
2 files changed, 15 insertions(+)
diff --git a/src/main.zig b/src/main.zig
index 3f38fd1f78..75655d6a2a 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -2010,6 +2010,11 @@ fn buildOutputType(
_ = system_libs.orderedRemove(lib_name);
continue;
}
+ if (target_util.is_compiler_rt_lib_name(target_info.target, lib_name)) {
+ std.log.warn("ignoring superfluous library '{s}': this dependency is fulfilled instead by compiler-rt which zig unconditionally provides", .{lib_name});
+ _ = system_libs.orderedRemove(lib_name);
+ continue;
+ }
if (std.fs.path.isAbsolute(lib_name)) {
fatal("cannot use absolute path as a system library: {s}", .{lib_name});
}
diff --git a/src/target.zig b/src/target.zig
index 2c21fb5c61..63bd1db0b5 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -427,6 +427,16 @@ pub fn is_libcpp_lib_name(target: std.Target, name: []const u8) bool {
eqlIgnoreCase(ignore_case, name, "c++abi");
}
+pub fn is_compiler_rt_lib_name(target: std.Target, name: []const u8) bool {
+ if (target.abi.isGnu() and std.mem.eql(u8, name, "gcc_s")) {
+ return true;
+ }
+ if (std.mem.eql(u8, name, "compiler_rt")) {
+ return true;
+ }
+ return false;
+}
+
pub fn hasDebugInfo(target: std.Target) bool {
return !target.cpu.arch.isWasm();
}
From 274b9d5c1d01c9549f2f12e76654a850df94a057 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 16:42:15 -0700
Subject: [PATCH 0112/2031] ci: work around azure networking issue
---
ci/azure/pipelines.yml | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/ci/azure/pipelines.yml b/ci/azure/pipelines.yml
index b1b99288e8..6bb54e0402 100644
--- a/ci/azure/pipelines.yml
+++ b/ci/azure/pipelines.yml
@@ -93,6 +93,11 @@ jobs:
Set-Variable -Name ZIGBUILDDIR -Value "$(Get-Location)\build"
$Env:AWS_SHARED_CREDENTIALS_FILE = "$Env:DOWNLOADSECUREFILE_SECUREFILEPATH"
+ # Workaround Azure networking issue
+ # https://github.com/aws/aws-cli/issues/5749
+ $Env:AWS_EC2_METADATA_DISABLED = "true"
+ $Env:AWS_REGION = "us-west-2"
+
cd "$ZIGBUILDDIR"
mv ../LICENSE dist/
mv ../zig-cache/langref.html dist/
From 44b5fdf3266f11607313bc9990a876b5a7f9e174 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 18:26:56 -0700
Subject: [PATCH 0113/2031] Revert "ci: azure: split build-and-test step"
This reverts commit 846eb701821a3f2af514bbad770478e3276b2d89.
This did not properly translate the upload portion of the CI script to
powershell which broke our CI pipeline.
---
CMakeLists.txt | 2 +-
ci/azure/pipelines.yml | 137 ++++---------------------------
ci/azure/windows_msvc_install | 16 ++++
ci/azure/windows_msvc_script.bat | 39 +++++++++
ci/azure/windows_upload | 46 +++++++++++
5 files changed, 120 insertions(+), 120 deletions(-)
create mode 100644 ci/azure/windows_msvc_install
create mode 100644 ci/azure/windows_msvc_script.bat
create mode 100755 ci/azure/windows_upload
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 533e03383f..8fd1960518 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -63,7 +63,7 @@ if("${ZIG_VERSION}" STREQUAL "")
endif()
endif()
endif()
-message(STATUS "Configuring zig version ${ZIG_VERSION}")
+message("Configuring zig version ${ZIG_VERSION}")
set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)")
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
diff --git a/ci/azure/pipelines.yml b/ci/azure/pipelines.yml
index 6bb54e0402..4524ee9fb1 100644
--- a/ci/azure/pipelines.yml
+++ b/ci/azure/pipelines.yml
@@ -10,7 +10,6 @@ jobs:
- script: ci/azure/macos_script
name: main
displayName: 'Build and test'
-
- job: BuildMacOS_arm64
pool:
vmImage: 'macOS-10.15'
@@ -22,130 +21,30 @@ jobs:
- script: ci/azure/macos_arm64_script
name: main
displayName: 'Build'
-
- job: BuildWindows
- timeoutInMinutes: 360
pool:
vmImage: 'windows-2019'
- variables:
- LLVM_CLANG_LLD_URL: 'https://ziglang.org/deps/llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt.tar.xz'
- LLVM_CLANG_LLD_DIR: 'llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt'
+ timeoutInMinutes: 360
steps:
- - pwsh: |
- (New-Object Net.WebClient).DownloadFile("$(LLVM_CLANG_LLD_URL)", "${LLVM_CLANG_LLD_DIR}.tar.xz")
- & 'C:\Program Files\7-Zip\7z.exe' x "${LLVM_CLANG_LLD_DIR}.tar.xz"
- & 'C:\Program Files\7-Zip\7z.exe' x "${LLVM_CLANG_LLD_DIR}.tar"
- name: install
- displayName: 'Install LLVM/CLANG/LLD'
-
- - pwsh: |
- Set-Variable -Name ZIGBUILDDIR -Value "$(Get-Location)\build"
- Set-Variable -Name ZIGINSTALLDIR -Value "$ZIGBUILDDIR\dist"
- Set-Variable -Name ZIGPREFIXPATH -Value "$(Get-Location)\$(LLVM_CLANG_LLD_DIR)"
-
- # Make the `zig version` number consistent.
- # This will affect the cmake command below.
- git config core.abbrev 9
- git fetch --tags
-
- mkdir $ZIGBUILDDIR
- cd $ZIGBUILDDIR
- & "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
- cmake .. `
- -Thost=x64 `
- -G "Visual Studio 16 2019" `
- -A x64 `
- -DCMAKE_INSTALL_PREFIX="$ZIGINSTALLDIR" `
- -DCMAKE_PREFIX_PATH="$ZIGPREFIXPATH" `
- -DCMAKE_BUILD_TYPE=Release `
- -DZIG_OMIT_STAGE2=ON 2> out-null
- & "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\MSBuild\Current\Bin\MSBuild.exe" `
- /maxcpucount /p:Configuration=Release INSTALL.vcxproj
- name: build
- displayName: 'Build'
-
- - pwsh: |
- Set-Variable -Name ZIGINSTALLDIR -Value "$(Get-Location)\build\dist"
-
- # Sadly, stage2 is omitted from this build to save memory on the CI server. Once self-hosted is
- # built with itself and does not gobble as much memory, we can enable these tests.
- #& "$ZIGINSTALLDIR\bin\zig.exe" test "..\test\behavior.zig" -fno-stage1 -fLLVM -I "..\test" 2>&1
-
- & "$ZIGINSTALLDIR\bin\zig.exe" build test-toolchain -Dskip-non-native -Dskip-stage2-tests 2>&1
- & "$ZIGINSTALLDIR\bin\zig.exe" build test-std -Dskip-non-native 2>&1
- name: test
- displayName: 'Test'
-
- - pwsh: |
- Set-Variable -Name ZIGINSTALLDIR -Value "$(Get-Location)\build\dist"
-
- & "$ZIGINSTALLDIR\bin\zig.exe" build docs
- timeoutInMinutes: 60
- name: doc
- displayName: 'Documentation'
-
+ - powershell: |
+ (New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2022-01-28/msys2-base-x86_64-20220128.sfx.exe", "sfx.exe")
+ .\sfx.exe -y -o\
+ displayName: Download/Extract/Install MSYS2
+ - script: |
+ @REM install updated filesystem package first without dependency checking
+ @REM because of: https://github.com/msys2/MSYS2-packages/issues/2021
+ %CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Sydd filesystem"
+ displayName: Workaround filesystem dash MSYS2 dependency issue
+ - script: |
+ %CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu"
+ %CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu"
+ displayName: Update MSYS2
- task: DownloadSecureFile@1
inputs:
- name: aws_credentials
- secureFile: aws_credentials
-
- - pwsh: |
- Set-Variable -Name ZIGBUILDDIR -Value "$(Get-Location)\build"
- $Env:AWS_SHARED_CREDENTIALS_FILE = "$Env:DOWNLOADSECUREFILE_SECUREFILEPATH"
-
- # Workaround Azure networking issue
- # https://github.com/aws/aws-cli/issues/5749
- $Env:AWS_EC2_METADATA_DISABLED = "true"
- $Env:AWS_REGION = "us-west-2"
-
- cd "$ZIGBUILDDIR"
- mv ../LICENSE dist/
- mv ../zig-cache/langref.html dist/
- mv dist/bin/zig.exe dist/
- rmdir dist/bin
-
- # Remove the unnecessary zig dir in $prefix/lib/zig/std/std.zig
- mv dist/lib/zig dist/lib2
- rmdir dist/lib
- mv dist/lib2 dist/lib
-
- Set-Variable -Name VERSION -Value $(./dist/zig.exe version)
- Set-Variable -Name DIRNAME -Value "zig-windows-x86_64-$VERSION"
- Set-Variable -Name TARBALL -Value "$DIRNAME.zip"
- mv dist "$DIRNAME"
- 7z a "$TARBALL" "$DIRNAME"
-
- aws s3 cp `
- "$TARBALL" `
- s3://ziglang.org/builds/ `
- --cache-control 'public, max-age=31536000, immutable'
-
- Set-Variable -Name SHASUM -Value (Get-FileHash "$TARBALL" -Algorithm SHA256 | select-object -ExpandProperty Hash)
- Set-Variable -Name BYTESIZE -Value (Get-Item "$TARBALL").length
-
- Set-Variable -Name JSONFILE -Value "windows-${Env:BUILD_SOURCEBRANCHNAME}.json"
- echo $null > $JSONFILE
- echo ('{"tarball": "' + $TARBALL + '",') >> $JSONFILE
- echo ('"shasum": "' + $SHASUM + '",') >> $JSONFILE
- echo ('"size": ' + $BYTESIZE + '}' ) >> $JSONFILE
-
- aws s3 cp `
- "$JSONFILE" `
- s3://ziglang.org/builds/ `
- --cache-control 'max-age=0, must-revalidate'
-
- aws s3 cp `
- "$JSONFILE" `
- "s3://ziglang.org/builds/x86_64-windows-${VERSION}.json"
-
- echo "##vso[task.setvariable variable=tarball;isOutput=true]$TARBALL"
- echo "##vso[task.setvariable variable=shasum;isOutput=true]$SHASUM"
- echo "##vso[task.setvariable variable=bytesize;isOutput=true]$BYTESIZE"
-
- name: upload
- condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))
- displayName: 'Upload'
-
+ secureFile: s3cfg
+ - script: ci/azure/windows_msvc_script.bat
+ name: main
+ displayName: 'Build and test'
- job: OnMasterSuccess
dependsOn:
- BuildMacOS
diff --git a/ci/azure/windows_msvc_install b/ci/azure/windows_msvc_install
new file mode 100644
index 0000000000..2df445fe12
--- /dev/null
+++ b/ci/azure/windows_msvc_install
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+set -x
+set -e
+
+pacman -Suy --needed --noconfirm
+pacman -S --needed --noconfirm wget p7zip python3-pip tar xz
+
+TARBALL="llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt.tar.xz"
+
+pip install s3cmd
+wget -nv "https://ziglang.org/deps/$TARBALL"
+# If the first extraction fails, re-try it once; this can happen if the tarball
+# contains symlinks that are in the table of contents before the files that
+# they point to.
+tar -xf $TARBALL || tar --overwrite -xf $TARBALL
diff --git a/ci/azure/windows_msvc_script.bat b/ci/azure/windows_msvc_script.bat
new file mode 100644
index 0000000000..c61c88093c
--- /dev/null
+++ b/ci/azure/windows_msvc_script.bat
@@ -0,0 +1,39 @@
+@echo on
+SET "SRCROOT=%cd%"
+SET "PREVPATH=%PATH%"
+SET "PREVMSYSTEM=%MSYSTEM%"
+
+set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"
+SET "MSYSTEM=MINGW64"
+bash -lc "cd ${SRCROOT} && ci/azure/windows_msvc_install" || exit /b
+SET "PATH=%PREVPATH%"
+SET "MSYSTEM=%PREVMSYSTEM%"
+
+SET "ZIGBUILDDIR=%SRCROOT%\build"
+SET "ZIGINSTALLDIR=%ZIGBUILDDIR%\dist"
+SET "ZIGPREFIXPATH=%SRCROOT%\llvm+clang+lld-13.0.0-x86_64-windows-msvc-release-mt"
+
+call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
+
+REM Make the `zig version` number consistent.
+REM This will affect the cmake command below.
+git.exe config core.abbrev 9
+git.exe fetch --unshallow
+git.exe fetch --tags
+
+mkdir %ZIGBUILDDIR%
+cd %ZIGBUILDDIR%
+cmake.exe .. -Thost=x64 -G"Visual Studio 16 2019" -A x64 "-DCMAKE_INSTALL_PREFIX=%ZIGINSTALLDIR%" "-DCMAKE_PREFIX_PATH=%ZIGPREFIXPATH%" -DCMAKE_BUILD_TYPE=Release -DZIG_OMIT_STAGE2=ON || exit /b
+msbuild /maxcpucount /p:Configuration=Release INSTALL.vcxproj || exit /b
+
+REM Sadly, stage2 is omitted from this build to save memory on the CI server. Once self-hosted is
+REM built with itself and does not gobble as much memory, we can enable these tests.
+REM "%ZIGINSTALLDIR%\bin\zig.exe" test "..\test\behavior.zig" -fno-stage1 -fLLVM -I "..\test" || exit /b
+
+"%ZIGINSTALLDIR%\bin\zig.exe" build test-toolchain -Dskip-non-native -Dskip-stage2-tests || exit /b
+"%ZIGINSTALLDIR%\bin\zig.exe" build test-std -Dskip-non-native || exit /b
+"%ZIGINSTALLDIR%\bin\zig.exe" build docs || exit /b
+
+set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"
+SET "MSYSTEM=MINGW64"
+bash -lc "cd ${SRCROOT} && ci/azure/windows_upload" || exit /b
diff --git a/ci/azure/windows_upload b/ci/azure/windows_upload
new file mode 100755
index 0000000000..9c5e07e5f9
--- /dev/null
+++ b/ci/azure/windows_upload
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+set -x
+set -e
+
+if [ "${BUILD_REASON}" != "PullRequest" ]; then
+ cd "$ZIGBUILDDIR"
+
+ mv ../LICENSE dist/
+ mv ../zig-cache/langref.html dist/
+ mv dist/bin/zig.exe dist/
+ rmdir dist/bin
+
+ # Remove the unnecessary zig dir in $prefix/lib/zig/std/std.zig
+ mv dist/lib/zig dist/lib2
+ rmdir dist/lib
+ mv dist/lib2 dist/lib
+
+ VERSION=$(dist/zig.exe version)
+ DIRNAME="zig-windows-x86_64-$VERSION"
+ TARBALL="$DIRNAME.zip"
+ mv dist "$DIRNAME"
+ 7z a "$TARBALL" "$DIRNAME"
+
+ # mv "$DOWNLOADSECUREFILE_SECUREFILEPATH" "$HOME/.s3cfg"
+ s3cmd -c "$DOWNLOADSECUREFILE_SECUREFILEPATH" put -P --add-header="cache-control: public, max-age=31536000, immutable" "$TARBALL" s3://ziglang.org/builds/
+
+ SHASUM=$(sha256sum $TARBALL | cut '-d ' -f1)
+ BYTESIZE=$(wc -c < $TARBALL)
+
+ JSONFILE="windows-$GITBRANCH.json"
+ touch $JSONFILE
+ echo "{\"tarball\": \"$TARBALL\"," >>$JSONFILE
+ echo "\"shasum\": \"$SHASUM\"," >>$JSONFILE
+ echo "\"size\": \"$BYTESIZE\"}" >>$JSONFILE
+
+ s3cmd -c "$DOWNLOADSECUREFILE_SECUREFILEPATH" put -P --add-header="Cache-Control: max-age=0, must-revalidate" "$JSONFILE" "s3://ziglang.org/builds/$JSONFILE"
+ s3cmd -c "$DOWNLOADSECUREFILE_SECUREFILEPATH" put -P "$JSONFILE" "s3://ziglang.org/builds/x86_64-windows-$VERSION.json"
+
+ # `set -x` causes these variables to be mangled.
+ # See https://developercommunity.visualstudio.com/content/problem/375679/pipeline-variable-incorrectly-inserts-single-quote.html
+ set +x
+ echo "##vso[task.setvariable variable=tarball;isOutput=true]$TARBALL"
+ echo "##vso[task.setvariable variable=shasum;isOutput=true]$SHASUM"
+ echo "##vso[task.setvariable variable=bytesize;isOutput=true]$BYTESIZE"
+fi
From 7f0cf395aa74eb5ea250bd28f7525b3036790a6a Mon Sep 17 00:00:00 2001
From: John Schmidt
Date: Fri, 4 Feb 2022 20:21:15 +0100
Subject: [PATCH 0114/2031] stage2: implement all builtin floatops for
f{16,32,64}
- Merge `floatop.zig` and `floatop_stage1.zig` since most tests now pass
on stage2.
- Add more behavior tests for a bunch of functions.
---
src/Air.zig | 53 +++-
src/Liveness.zig | 12 +
src/Sema.zig | 24 +-
src/arch/aarch64/CodeGen.zig | 15 +-
src/arch/arm/CodeGen.zig | 15 +-
src/arch/riscv64/CodeGen.zig | 15 +-
src/arch/wasm/CodeGen.zig | 12 +
src/arch/x86_64/CodeGen.zig | 15 +-
src/codegen/c.zig | 21 +-
src/codegen/llvm.zig | 18 +-
src/print_air.zig | 12 +
src/value.zig | 384 ++++++++++++++++++++++++++
test/behavior.zig | 1 -
test/behavior/floatop.zig | 362 ++++++++++++++++++++++++-
test/behavior/floatop_stage1.zig | 452 -------------------------------
15 files changed, 917 insertions(+), 494 deletions(-)
delete mode 100644 test/behavior/floatop_stage1.zig
diff --git a/src/Air.zig b/src/Air.zig
index a044dd6294..623da26255 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -237,9 +237,45 @@ pub const Inst = struct {
/// Uses the `ty_op` field.
popcount,
- /// Computes the square root of a floating point number.
+ /// Square root of a floating point number.
/// Uses the `un_op` field.
sqrt,
+ /// Sine a floating point number.
+ /// Uses the `un_op` field.
+ sin,
+ /// Cosine a floating point number.
+ /// Uses the `un_op` field.
+ cos,
+ /// Base e exponential of a floating point number.
+ /// Uses the `un_op` field.
+ exp,
+ /// Base 2 exponential of a floating point number.
+ /// Uses the `un_op` field.
+ exp2,
+ /// Natural (base e) logarithm of a floating point number.
+ /// Uses the `un_op` field.
+ log,
+ /// Base 2 logarithm of a floating point number.
+ /// Uses the `un_op` field.
+ log2,
+ /// Base 10 logarithm of a floating point number.
+ /// Uses the `un_op` field.
+ log10,
+ /// Aboslute value of a floating point number.
+ /// Uses the `un_op` field.
+ fabs,
+ /// Floor: rounds a floating pointer number down to the nearest integer.
+ /// Uses the `un_op` field.
+ floor,
+ /// Ceiling: rounds a floating pointer number up to the nearest integer.
+ /// Uses the `un_op` field.
+ ceil,
+ /// Rounds a floating pointer number to the nearest integer.
+ /// Uses the `un_op` field.
+ round,
+ /// Rounds a floating pointer number to the nearest integer towards zero.
+ /// Uses the `un_op` field.
+ trunc_float,
/// `<`. Result type is always bool.
/// Uses the `bin_op` field.
@@ -754,7 +790,20 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.max,
=> return air.typeOf(datas[inst].bin_op.lhs),
- .sqrt => return air.typeOf(datas[inst].un_op),
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ => return air.typeOf(datas[inst].un_op),
.cmp_lt,
.cmp_lte,
diff --git a/src/Liveness.zig b/src/Liveness.zig
index bed7de1507..12ba63fc00 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -339,6 +339,18 @@ fn analyzeInst(
.tag_name,
.error_name,
.sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
=> {
const operand = inst_datas[inst].un_op;
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
diff --git a/src/Sema.zig b/src/Sema.zig
index 4ff535d86e..4d38c6b7f7 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -747,18 +747,18 @@ fn analyzeBodyInner(
.ctz => try sema.zirClzCtz(block, inst, .ctz, Value.ctz),
.sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt),
- .sin => @panic("TODO"),
- .cos => @panic("TODO"),
- .exp => @panic("TODO"),
- .exp2 => @panic("TODO"),
- .log => @panic("TODO"),
- .log2 => @panic("TODO"),
- .log10 => @panic("TODO"),
- .fabs => @panic("TODO"),
- .floor => @panic("TODO"),
- .ceil => @panic("TODO"),
- .trunc => @panic("TODO"),
- .round => @panic("TODO"),
+ .sin => try sema.zirUnaryMath(block, inst, .sin, Value.sin),
+ .cos => try sema.zirUnaryMath(block, inst, .cos, Value.cos),
+ .exp => try sema.zirUnaryMath(block, inst, .exp, Value.exp),
+ .exp2 => try sema.zirUnaryMath(block, inst, .exp2, Value.exp2),
+ .log => try sema.zirUnaryMath(block, inst, .log, Value.log),
+ .log2 => try sema.zirUnaryMath(block, inst, .log2, Value.log2),
+ .log10 => try sema.zirUnaryMath(block, inst, .log10, Value.log10),
+ .fabs => try sema.zirUnaryMath(block, inst, .fabs, Value.fabs),
+ .floor => try sema.zirUnaryMath(block, inst, .floor, Value.floor),
+ .ceil => try sema.zirUnaryMath(block, inst, .ceil, Value.ceil),
+ .round => try sema.zirUnaryMath(block, inst, .round, Value.round),
+ .trunc => try sema.zirUnaryMath(block, inst, .trunc_float, Value.trunc),
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index d0413af02f..42f2c66df1 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -528,7 +528,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
- .sqrt => try self.airUnaryMath(inst),
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float
+ => try self.airUnaryMath(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 2116717cd1..711e2a96f0 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -520,7 +520,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
- .sqrt => try self.airUnaryMath(inst),
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ => try self.airUnaryMath(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index ce5dc39bf8..09ca92f229 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -507,7 +507,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
- .sqrt => try self.airUnaryMath(inst),
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ => try self.airUnaryMath(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 5186f0231b..b0c24be03b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1679,6 +1679,18 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.unwrap_errunion_err_ptr,
.sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index de8f907dea..d78245b094 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -599,7 +599,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
- .sqrt => try self.airUnaryMath(inst),
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ => try self.airUnaryMath(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index fc3848df21..5d7bdcee55 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1446,7 +1446,20 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul_sat => try airSatOp(f, inst, "muls_"),
.shl_sat => try airSatOp(f, inst, "shls_"),
- .sqrt => try airSqrt(f, inst),
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ => |tag| return f.fail("TODO: C backend: implement unary op for tag '{s}'", .{@tagName(tag)}),
.add_with_overflow => try airAddWithOverflow(f, inst),
.sub_with_overflow => try airSubWithOverflow(f, inst),
@@ -3395,12 +3408,6 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue {
return CValue.none;
}
-fn airSqrt(f: *Function, inst: Air.Inst.Index) !CValue {
- _ = f;
- _ = inst;
- return f.fail("TODO: C backend: implement sqrt", .{});
-}
-
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
return switch (order) {
.Unordered => "memory_order_relaxed",
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 5a2c21eece..9c9bf0d0f2 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2050,7 +2050,19 @@ pub const FuncGen = struct {
.shr => try self.airShr(inst, false),
.shr_exact => try self.airShr(inst, true),
- .sqrt => try self.airSqrt(inst),
+ .sqrt => try self.airUnaryOp(inst, "llvm.sqrt"),
+ .sin => try self.airUnaryOp(inst, "llvm.sin"),
+ .cos => try self.airUnaryOp(inst, "llvm.cos"),
+ .exp => try self.airUnaryOp(inst, "llvm.exp"),
+ .exp2 => try self.airUnaryOp(inst, "llvm.exp2"),
+ .log => try self.airUnaryOp(inst, "llvm.log"),
+ .log2 => try self.airUnaryOp(inst, "llvm.log2"),
+ .log10 => try self.airUnaryOp(inst, "llvm.log10"),
+ .fabs => try self.airUnaryOp(inst, "llvm.fabs"),
+ .floor => try self.airUnaryOp(inst, "llvm.floor"),
+ .ceil => try self.airUnaryOp(inst, "llvm.ceil"),
+ .round => try self.airUnaryOp(inst, "llvm.round"),
+ .trunc_float => try self.airUnaryOp(inst, "llvm.trunc"),
.cmp_eq => try self.airCmp(inst, .eq),
.cmp_gt => try self.airCmp(inst, .gt),
@@ -4213,7 +4225,7 @@ pub const FuncGen = struct {
}
}
- fn airSqrt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -4221,7 +4233,7 @@ pub const FuncGen = struct {
const operand_ty = self.air.typeOf(un_op);
const operand_llvm_ty = try self.dg.llvmType(operand_ty);
- const fn_val = self.getIntrinsic("llvm.sqrt", &.{operand_llvm_ty});
+ const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
const params = [_]*const llvm.Value{operand};
return self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, "");
diff --git a/src/print_air.zig b/src/print_air.zig
index 341e736b91..bcadf31e74 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -159,6 +159,18 @@ const Writer = struct {
.tag_name,
.error_name,
.sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
=> try w.writeUnOp(s, inst),
.breakpoint,
diff --git a/src/value.zig b/src/value.zig
index 9e1f4c0ed6..33a75e08bb 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -3308,6 +3308,390 @@ pub const Value = extern union {
}
}
+ pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @sin(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @sin(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @sin(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt sin for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @sin(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt sin for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @sin(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @cos(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @cos(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @cos(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt cos for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @cos(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt cos for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @cos(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @exp(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @exp(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @exp(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt exp for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @exp(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt exp for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @exp(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @exp2(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @exp2(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @exp2(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt exp2 for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @exp2(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt exp2 for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @exp2(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @log(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @log(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @log(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt log for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @log(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt log for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @log(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @log2(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @log2(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @log2(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt log2 for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @log2(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt log2 for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @log2(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @log10(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @log10(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @log10(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt log10 for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @log10(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt log10 for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @log10(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @fabs(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @fabs(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @fabs(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt fabs for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @fabs(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt fabs for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @fabs(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @floor(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @floor(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @floor(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt floor for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @floor(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt floor for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @floor(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @ceil(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @ceil(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @ceil(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt ceil for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @ceil(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt ceil for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @ceil(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @round(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @round(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @round(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt round for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @round(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt round for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @round(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @trunc(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @trunc(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @trunc(f));
+ },
+ 80 => {
+ if (true) {
+ @panic("TODO implement compiler_rt trunc for f80");
+ }
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @trunc(f));
+ },
+ 128 => {
+ if (true) {
+ @panic("TODO implement compiler_rt trunc for f128");
+ }
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @trunc(f));
+ },
+ else => unreachable,
+ }
+ }
+
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
diff --git a/test/behavior.zig b/test/behavior.zig
index c177dd8634..b2ffbabde2 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -150,7 +150,6 @@ test {
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/export_self_referential_type_info.zig");
_ = @import("behavior/field_parent_ptr.zig");
- _ = @import("behavior/floatop_stage1.zig");
_ = @import("behavior/fn_delegation.zig");
_ = @import("behavior/ir_block_deps.zig");
_ = @import("behavior/misc.zig");
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index 7807c690f6..ed632c26c5 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -1,12 +1,22 @@
const std = @import("std");
+const builtin = @import("builtin");
const expect = std.testing.expect;
const math = std.math;
const pi = std.math.pi;
const e = std.math.e;
const Vector = std.meta.Vector;
+const has_f80_rt = @import("builtin").cpu.arch == .x86_64;
+const epsilon_16 = 0.001;
const epsilon = 0.000001;
+fn epsForType(comptime T: type) T {
+ return switch (T) {
+ f16 => @as(f16, epsilon_16),
+ else => @as(T, epsilon),
+ };
+}
+
test "floating point comparisons" {
try testFloatComparisons();
comptime try testFloatComparisons();
@@ -79,19 +89,37 @@ test "@sqrt" {
}
fn testSqrt() !void {
- {
- var a: f16 = 4;
- try expect(@sqrt(a) == 2);
- }
- {
- var a: f32 = 9;
- try expect(@sqrt(a) == 3);
- var b: f32 = 1.1;
- try expect(math.approxEqAbs(f32, @sqrt(b), 1.0488088481701516, epsilon));
- }
- {
- var a: f64 = 25;
- try expect(@sqrt(a) == 5);
+ try expect(@sqrt(@as(f16, 4)) == 2);
+ try expect(@sqrt(@as(f32, 9)) == 3);
+ try expect(@sqrt(@as(f64, 25)) == 5);
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 1.1)), 1.0488088481701516, epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 2.0)), 1.4142135623730950, epsilon));
+
+ if (builtin.zig_backend == .stage1) {
+ if (has_f80_rt) {
+ var a: f80 = 25;
+ try expect(@sqrt(a) == 5);
+ }
+ {
+ const a: comptime_float = 25.0;
+ try expect(@sqrt(a) == 5.0);
+ }
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ //{
+ // var a: f128 = 49;
+ //try expect(@sqrt(a) == 7);
+ //}
+
+ // TODO Implement Vector support for stage2
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
+ var result = @sqrt(v);
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 3.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 4.4)), result[3], epsilon));
+ }
}
}
@@ -114,3 +142,311 @@ test "more @sqrt f16 tests" {
try expect(math.isNan(@sqrt(@as(f16, -1.0))));
try expect(math.isNan(@sqrt(@as(f16, math.nan(f16)))));
}
+
+test "@sin" {
+ comptime try testSin();
+ try testSin();
+}
+
+fn testSin() !void {
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
+ var result = @sin(v);
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 3.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 4.4)), result[3], epsilon));
+
+ // stage1 emits an incorrect compile error for `@as(ty, std.math.pi / 2)`
+ // so skip the rest of the tests.
+ return;
+ }
+
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(@sin(@as(ty, 0)) == 0);
+ try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi)), 0, eps));
+ try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi / 2)), 1, eps));
+ try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi / 4)), 0.7071067811865475, eps));
+ }
+}
+
+test "@cos" {
+ comptime try testCos();
+ try testCos();
+}
+
+fn testCos() !void {
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
+ var result = @cos(v);
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 3.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 4.4)), result[3], epsilon));
+
+ // stage1 emits an incorrect compile error for `@as(ty, std.math.pi / 2)`
+ // so skip the rest of the tests.
+ return;
+ }
+
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(@cos(@as(ty, 0)) == 1);
+ try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi)), -1, eps));
+ try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi / 2)), 0, eps));
+ try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi / 4)), 0.7071067811865475, eps));
+ }
+}
+
+test "@exp" {
+ comptime try testExp();
+ try testExp();
+}
+
+fn testExp() !void {
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(@exp(@as(ty, 0)) == 1);
+ try expect(math.approxEqAbs(ty, @exp(@as(ty, 2)), 7.389056098930650, eps));
+ try expect(math.approxEqAbs(ty, @exp(@as(ty, 5)), 148.4131591025766, eps));
+ }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @exp(v);
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@exp2" {
+ comptime try testExp2();
+ try testExp2();
+}
+
+fn testExp2() !void {
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(@exp2(@as(ty, 2)) == 4);
+ try expect(math.approxEqAbs(ty, @exp2(@as(ty, 1.5)), 2.8284271247462, eps));
+ try expect(math.approxEqAbs(ty, @exp2(@as(ty, 4.5)), 22.627416997969, eps));
+ }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @exp2(v);
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@log" {
+ // Old musl (and glibc?), and our current math.ln implementation do not return 1
+ // so also accept those values.
+ comptime try testLog();
+ try testLog();
+}
+
+fn testLog() !void {
+ {
+ var a: f16 = e;
+ try expect(math.approxEqAbs(f16, @log(a), 1, epsilon));
+ }
+ {
+ var a: f32 = e;
+ try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff)));
+ }
+ {
+ var a: f64 = e;
+ try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000)));
+ }
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(math.approxEqAbs(ty, @log(@as(ty, 2)), 0.6931471805599, eps));
+ try expect(math.approxEqAbs(ty, @log(@as(ty, 5)), 1.6094379124341, eps));
+ }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @log(v);
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@log2" {
+ comptime try testLog2();
+ try testLog2();
+}
+
+fn testLog2() !void {
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(@log2(@as(ty, 4)) == 2);
+ try expect(math.approxEqAbs(ty, @log2(@as(ty, 6)), 2.5849625007212, eps));
+ try expect(math.approxEqAbs(ty, @log2(@as(ty, 10)), 3.3219280948874, eps));
+ }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @log2(v);
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@log10" {
+ comptime try testLog10();
+ try testLog10();
+}
+
+fn testLog10() !void {
+ inline for ([_]type{ f16, f32, f64 }) |ty| {
+ const eps = epsForType(ty);
+ try expect(@log10(@as(ty, 100)) == 2);
+ try expect(math.approxEqAbs(ty, @log10(@as(ty, 15)), 1.176091259056, eps));
+ try expect(math.approxEqAbs(ty, @log10(@as(ty, 50)), 1.698970004336, eps));
+ }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @log10(v);
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@fabs" {
+ comptime try testFabs();
+ try testFabs();
+}
+
+fn testFabs() !void {
+ try expect(@fabs(@as(f16, -2.5)) == 2.5);
+ try expect(@fabs(@as(f16, 2.5)) == 2.5);
+ try expect(@fabs(@as(f32, -2.5)) == 2.5);
+ try expect(@fabs(@as(f32, 2.5)) == 2.5);
+ try expect(@fabs(@as(f64, -2.5)) == 2.5);
+ try expect(@fabs(@as(f64, 2.5)) == 2.5);
+
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ // {
+ // var a: f80 = -2.5;
+ // var b: f80 = 2.5;
+ // try expect(@fabs(a) == 2.5);
+ // try expect(@fabs(b) == 2.5);
+ // }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @fabs(v);
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
+
+test "@floor" {
+ comptime try testFloor();
+ try testFloor();
+}
+
+fn testFloor() !void {
+ try expect(@floor(@as(f16, 2.1)) == 2);
+ try expect(@floor(@as(f32, 2.1)) == 2);
+ try expect(@floor(@as(f64, 3.5)) == 3);
+
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ // {
+ // var a: f80 = 3.5;
+ // try expect(@floor(a) == 3);
+ // }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @floor(v);
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
+
+test "@ceil" {
+ comptime try testCeil();
+ try testCeil();
+}
+
+fn testCeil() !void {
+ try expect(@ceil(@as(f16, 2.1)) == 3);
+ try expect(@ceil(@as(f32, 2.1)) == 3);
+ try expect(@ceil(@as(f64, 3.5)) == 4);
+
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ // {
+ // var a: f80 = 3.5;
+ // try expect(@ceil(a) == 4);
+ // }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @ceil(v);
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
+
+test "@trunc" {
+ comptime try testTrunc();
+ try testTrunc();
+}
+
+fn testTrunc() !void {
+ try expect(@trunc(@as(f16, 2.1)) == 2);
+ try expect(@trunc(@as(f32, 2.1)) == 2);
+ try expect(@trunc(@as(f64, -3.5)) == -3);
+
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ // {
+ // var a: f80 = -3.5;
+ // try expect(@trunc(a) == -3);
+ // }
+
+ // TODO: Implement Vector support for other backends
+ if (builtin.zig_backend == .stage1) {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @trunc(v);
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
diff --git a/test/behavior/floatop_stage1.zig b/test/behavior/floatop_stage1.zig
deleted file mode 100644
index cd11f41b40..0000000000
--- a/test/behavior/floatop_stage1.zig
+++ /dev/null
@@ -1,452 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const math = std.math;
-const pi = std.math.pi;
-const e = std.math.e;
-const Vector = std.meta.Vector;
-const has_f80_rt = @import("builtin").cpu.arch == .x86_64;
-
-const epsilon = 0.000001;
-
-test "@sqrt" {
- comptime try testSqrt();
- try testSqrt();
-}
-
-fn testSqrt() !void {
- if (has_f80_rt) {
- var a: f80 = 25;
- try expect(@sqrt(a) == 5);
- }
- {
- const a: comptime_float = 25.0;
- try expect(@sqrt(a) == 5.0);
- }
- // TODO https://github.com/ziglang/zig/issues/4026
- //{
- // var a: f128 = 49;
- //try expect(@sqrt(a) == 7);
- //}
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
- var result = @sqrt(v);
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 3.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 4.4)), result[3], epsilon));
- }
-}
-
-test "@sin" {
- comptime try testSin();
- try testSin();
-}
-
-fn testSin() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 0;
- try expect(@sin(a) == 0);
- }
- {
- var a: f32 = 0;
- try expect(@sin(a) == 0);
- }
- {
- var a: f64 = 0;
- try expect(@sin(a) == 0);
- }
- // {
- // var a: f80 = 0;
- // try expect(@sin(a) == 0);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
- var result = @sin(v);
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 3.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 4.4)), result[3], epsilon));
- }
-}
-
-test "@cos" {
- comptime try testCos();
- try testCos();
-}
-
-fn testCos() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 0;
- try expect(@cos(a) == 1);
- }
- {
- var a: f32 = 0;
- try expect(@cos(a) == 1);
- }
- {
- var a: f64 = 0;
- try expect(@cos(a) == 1);
- }
- // {
- // var a: f80 = 0;
- // try expect(@cos(a) == 1);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
- var result = @cos(v);
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 3.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 4.4)), result[3], epsilon));
- }
-}
-
-test "@exp" {
- comptime try testExp();
- try testExp();
-}
-
-fn testExp() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 0;
- try expect(@exp(a) == 1);
- }
- {
- var a: f32 = 0;
- try expect(@exp(a) == 1);
- }
- {
- var a: f64 = 0;
- try expect(@exp(a) == 1);
- }
- // {
- // var a: f80 = 0;
- // try expect(@exp(a) == 1);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @exp(v);
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@exp2" {
- comptime try testExp2();
- try testExp2();
-}
-
-fn testExp2() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2;
- try expect(@exp2(a) == 4);
- }
- {
- var a: f32 = 2;
- try expect(@exp2(a) == 4);
- }
- {
- var a: f64 = 2;
- try expect(@exp2(a) == 4);
- }
- // {
- // var a: f80 = 2;
- // try expect(@exp2(a) == 4);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @exp2(v);
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@log" {
- // Old musl (and glibc?), and our current math.ln implementation do not return 1
- // so also accept those values.
- comptime try testLog();
- try testLog();
-}
-
-fn testLog() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = e;
- try expect(math.approxEqAbs(f16, @log(a), 1, epsilon));
- }
- {
- var a: f32 = e;
- try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff)));
- }
- {
- var a: f64 = e;
- try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000)));
- }
- // {
- // var a: f80 = e;
- // try expect(@log(a) == 1);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @log(v);
- try expect(math.approxEqAbs(f32, @log(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @log(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @log(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @log(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@log2" {
- comptime try testLog2();
- try testLog2();
-}
-
-fn testLog2() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 4;
- try expect(@log2(a) == 2);
- }
- {
- var a: f32 = 4;
- try expect(@log2(a) == 2);
- }
- {
- var a: f64 = 4;
- try expect(@log2(a) == 2);
- }
- // {
- // var a: f80 = 4;
- // try expect(@log2(a) == 2);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @log2(v);
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@log10" {
- comptime try testLog10();
- try testLog10();
-}
-
-fn testLog10() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 100;
- try expect(@log10(a) == 2);
- }
- {
- var a: f32 = 100;
- try expect(@log10(a) == 2);
- }
- {
- var a: f64 = 1000;
- try expect(@log10(a) == 3);
- }
- // {
- // var a: f80 = 1000;
- // try expect(@log10(a) == 3);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @log10(v);
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@fabs" {
- comptime try testFabs();
- try testFabs();
-}
-
-fn testFabs() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = -2.5;
- var b: f16 = 2.5;
- try expect(@fabs(a) == 2.5);
- try expect(@fabs(b) == 2.5);
- }
- {
- var a: f32 = -2.5;
- var b: f32 = 2.5;
- try expect(@fabs(a) == 2.5);
- try expect(@fabs(b) == 2.5);
- }
- {
- var a: f64 = -2.5;
- var b: f64 = 2.5;
- try expect(@fabs(a) == 2.5);
- try expect(@fabs(b) == 2.5);
- }
- // {
- // var a: f80 = -2.5;
- // var b: f80 = 2.5;
- // try expect(@fabs(a) == 2.5);
- // try expect(@fabs(b) == 2.5);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @fabs(v);
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "@floor" {
- comptime try testFloor();
- try testFloor();
-}
-
-fn testFloor() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2.1;
- try expect(@floor(a) == 2);
- }
- {
- var a: f32 = 2.1;
- try expect(@floor(a) == 2);
- }
- {
- var a: f64 = 3.5;
- try expect(@floor(a) == 3);
- }
- // {
- // var a: f80 = 3.5;
- // try expect(@floor(a) == 3);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @floor(v);
- try expect(math.approxEqAbs(f32, @floor(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @floor(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @floor(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @floor(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "@ceil" {
- comptime try testCeil();
- try testCeil();
-}
-
-fn testCeil() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2.1;
- try expect(@ceil(a) == 3);
- }
- {
- var a: f32 = 2.1;
- try expect(@ceil(a) == 3);
- }
- {
- var a: f64 = 3.5;
- try expect(@ceil(a) == 4);
- }
- // {
- // var a: f80 = 3.5;
- // try expect(@ceil(a) == 4);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @ceil(v);
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "@trunc" {
- comptime try testTrunc();
- try testTrunc();
-}
-
-fn testTrunc() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2.1;
- try expect(@trunc(a) == 2);
- }
- {
- var a: f32 = 2.1;
- try expect(@trunc(a) == 2);
- }
- {
- var a: f64 = -3.5;
- try expect(@trunc(a) == -3);
- }
- // {
- // var a: f80 = -3.5;
- // try expect(@trunc(a) == -3);
- // }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @trunc(v);
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "floating point comparisons" {
- if (has_f80_rt) try testFloatComparisons();
- comptime try testFloatComparisons();
-}
-
-fn testFloatComparisons() !void {
- inline for ([_]type{ f16, f32, f64, f80, f128 }) |ty| {
- // No decimal part
- {
- const x: ty = 1.0;
- try expect(x == 1);
- try expect(x != 0);
- try expect(x > 0);
- try expect(x < 2);
- try expect(x >= 1);
- try expect(x <= 1);
- }
- // Non-zero decimal part
- {
- const x: ty = 1.5;
- try expect(x != 1);
- try expect(x != 2);
- try expect(x > 1);
- try expect(x < 2);
- try expect(x >= 1);
- try expect(x <= 2);
- }
- }
-}
From 1e5a494603d287ea3005dc35f0528c0311f43515 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 18:19:03 -0700
Subject: [PATCH 0115/2031] Sema: implement comptime ptr store to optional
payload
and error union payload
---
src/Sema.zig | 70 ++++++++++++-
test/behavior.zig | 4 +-
test/behavior/optional.zig | 163 ++++++++++++++++++++++++++++++
test/behavior/optional_llvm.zig | 27 -----
test/behavior/optional_stage1.zig | 108 --------------------
5 files changed, 232 insertions(+), 140 deletions(-)
delete mode 100644 test/behavior/optional_llvm.zig
delete mode 100644 test/behavior/optional_stage1.zig
diff --git a/src/Sema.zig b/src/Sema.zig
index 4d38c6b7f7..3b4f1c6f55 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -15069,8 +15069,74 @@ fn beginComptimePtrMutation(
else => unreachable,
}
},
- .eu_payload_ptr => return sema.fail(block, src, "TODO comptime store to eu_payload_ptr", .{}),
- .opt_payload_ptr => return sema.fail(block, src, "TODO comptime store opt_payload_ptr", .{}),
+ .eu_payload_ptr => {
+ const eu_ptr_val = ptr_val.castTag(.eu_payload_ptr).?.data;
+ var parent = try beginComptimePtrMutation(sema, block, src, eu_ptr_val);
+ const payload_ty = parent.ty.errorUnionPayload();
+ switch (parent.val.tag()) {
+ else => {
+ // An error union has been initialized to undefined at comptime and now we
+ // are for the first time setting the payload. We must change the
+ // representation of the error union from `undef` to `opt_payload`.
+ const arena = parent.beginArena(sema.gpa);
+ defer parent.finishArena();
+
+ const payload = try arena.create(Value.Payload.SubValue);
+ payload.* = .{
+ .base = .{ .tag = .eu_payload },
+ .data = Value.undef,
+ };
+
+ parent.val.* = Value.initPayload(&payload.base);
+
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &payload.data,
+ .ty = payload_ty,
+ };
+ },
+ .eu_payload => return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &parent.val.castTag(.eu_payload).?.data,
+ .ty = payload_ty,
+ },
+ }
+ },
+ .opt_payload_ptr => {
+ const opt_ptr_val = ptr_val.castTag(.opt_payload_ptr).?.data;
+ var parent = try beginComptimePtrMutation(sema, block, src, opt_ptr_val);
+ const payload_ty = try parent.ty.optionalChildAlloc(sema.arena);
+ switch (parent.val.tag()) {
+ .undef, .null_value => {
+ // An optional has been initialized to undefined at comptime and now we
+ // are for the first time setting the payload. We must change the
+ // representation of the optional from `undef` to `opt_payload`.
+ const arena = parent.beginArena(sema.gpa);
+ defer parent.finishArena();
+
+ const payload = try arena.create(Value.Payload.SubValue);
+ payload.* = .{
+ .base = .{ .tag = .opt_payload },
+ .data = Value.undef,
+ };
+
+ parent.val.* = Value.initPayload(&payload.base);
+
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &payload.data,
+ .ty = payload_ty,
+ };
+ },
+ .opt_payload => return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &parent.val.castTag(.opt_payload).?.data,
+ .ty = payload_ty,
+ },
+
+ else => unreachable,
+ }
+ },
.decl_ref => unreachable, // isComptimeMutablePtr() has been checked already
else => unreachable,
}
diff --git a/test/behavior.zig b/test/behavior.zig
index b2ffbabde2..525ae5b2a1 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -33,7 +33,7 @@ test {
_ = @import("behavior/hasdecl.zig");
_ = @import("behavior/hasfield.zig");
_ = @import("behavior/namespace_depends_on_compile_var.zig");
- _ = @import("behavior/optional_llvm.zig");
+ _ = @import("behavior/optional.zig");
_ = @import("behavior/prefetch.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
@@ -69,7 +69,6 @@ test {
_ = @import("behavior/inttoptr.zig");
_ = @import("behavior/member_func.zig");
_ = @import("behavior/null.zig");
- _ = @import("behavior/optional.zig");
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
@@ -154,7 +153,6 @@ test {
_ = @import("behavior/ir_block_deps.zig");
_ = @import("behavior/misc.zig");
_ = @import("behavior/muladd.zig");
- _ = @import("behavior/optional_stage1.zig");
_ = @import("behavior/popcount_stage1.zig");
_ = @import("behavior/reflection.zig");
_ = @import("behavior/select.zig");
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index d6d6249c6e..3caf777195 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -1,9 +1,13 @@
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "passing an optional integer as a parameter" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
const S = struct {
fn entry() bool {
var x: i32 = 1234;
@@ -21,12 +25,18 @@ test "passing an optional integer as a parameter" {
pub const EmptyStruct = struct {};
test "optional pointer to size zero struct" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
var e = EmptyStruct{};
var o: ?*EmptyStruct = &e;
try expect(o != null);
}
test "equality compare optional pointers" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
try testNullPtrsEql();
comptime try testNullPtrsEql();
}
@@ -48,6 +58,9 @@ fn testNullPtrsEql() !void {
}
test "optional with void type" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
const Foo = struct {
x: ?void,
};
@@ -56,6 +69,9 @@ test "optional with void type" {
}
test "address of unwrap optional" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
const S = struct {
const Foo = struct {
a: i32,
@@ -73,6 +89,9 @@ test "address of unwrap optional" {
}
test "nested optional field in struct" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
const S2 = struct {
y: u8,
};
@@ -86,6 +105,9 @@ test "nested optional field in struct" {
}
test "equality compare optional with non-optional" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
try test_cmp_optional_non_optional();
comptime try test_cmp_optional_non_optional();
}
@@ -120,6 +142,9 @@ fn test_cmp_optional_non_optional() !void {
}
test "unwrap function call with optional pointer return value" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
const S = struct {
fn entry() !void {
try expect(foo().?.* == 1234);
@@ -138,6 +163,9 @@ test "unwrap function call with optional pointer return value" {
}
test "nested orelse" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
const S = struct {
fn entry() !void {
try expect(func() == null);
@@ -159,3 +187,138 @@ test "nested orelse" {
try S.entry();
comptime try S.entry();
}
+
+test "self-referential struct through a slice of optional" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Node = struct {
+ children: []?Node,
+ data: ?u8,
+
+ fn new() Node {
+ return Node{
+ .children = undefined,
+ .data = null,
+ };
+ }
+ };
+ };
+
+ var n = S.Node.new();
+ try expect(n.data == null);
+}
+
+test "assigning to an unwrapped optional field in an inline loop" {
+ comptime var maybe_pos_arg: ?comptime_int = null;
+ inline for ("ab") |x| {
+ _ = x;
+ maybe_pos_arg = 0;
+ if (maybe_pos_arg.? != 0) {
+ @compileError("bad");
+ }
+ maybe_pos_arg.? = 10;
+ }
+}
+
+test "coerce an anon struct literal to optional struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Struct = struct {
+ field: u32,
+ };
+ fn doTheTest() !void {
+ var maybe_dims: ?Struct = null;
+ maybe_dims = .{ .field = 1 };
+ try expect(maybe_dims.?.field == 1);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "0-bit child type coerced to optional return ptr result location" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var y = Foo{};
+ var z = y.thing();
+ try expect(z != null);
+ }
+
+ const Foo = struct {
+ pub const Bar = struct {
+ field: *Foo,
+ };
+
+ pub fn thing(self: *Foo) ?Bar {
+ return Bar{ .field = self };
+ }
+ };
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "0-bit child type coerced to optional" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var it: Foo = .{
+ .list = undefined,
+ };
+ try expect(it.foo() != null);
+ }
+
+ const Empty = struct {};
+ const Foo = struct {
+ list: [10]Empty,
+
+ fn foo(self: *Foo) ?*Empty {
+ const data = &self.list[0];
+ return data;
+ }
+ };
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "array of optional unaligned types" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Enum = enum { one, two, three };
+
+ const SomeUnion = union(enum) {
+ Num: Enum,
+ Other: u32,
+ };
+
+ const values = [_]?SomeUnion{
+ SomeUnion{ .Num = .one },
+ SomeUnion{ .Num = .two },
+ SomeUnion{ .Num = .three },
+ SomeUnion{ .Num = .one },
+ SomeUnion{ .Num = .two },
+ SomeUnion{ .Num = .three },
+ };
+
+ // The index must be a runtime value
+ var i: usize = 0;
+ try expectEqual(Enum.one, values[i].?.Num);
+ i += 1;
+ try expectEqual(Enum.two, values[i].?.Num);
+ i += 1;
+ try expectEqual(Enum.three, values[i].?.Num);
+ i += 1;
+ try expectEqual(Enum.one, values[i].?.Num);
+ i += 1;
+ try expectEqual(Enum.two, values[i].?.Num);
+ i += 1;
+ try expectEqual(Enum.three, values[i].?.Num);
+}
diff --git a/test/behavior/optional_llvm.zig b/test/behavior/optional_llvm.zig
deleted file mode 100644
index 9fdf703a42..0000000000
--- a/test/behavior/optional_llvm.zig
+++ /dev/null
@@ -1,27 +0,0 @@
-const std = @import("std");
-const testing = std.testing;
-const expect = testing.expect;
-const expectEqual = testing.expectEqual;
-const builtin = @import("builtin");
-
-test "self-referential struct through a slice of optional" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- const S = struct {
- const Node = struct {
- children: []?Node,
- data: ?u8,
-
- fn new() Node {
- return Node{
- .children = undefined,
- .data = null,
- };
- }
- };
- };
-
- var n = S.Node.new();
- try expect(n.data == null);
-}
diff --git a/test/behavior/optional_stage1.zig b/test/behavior/optional_stage1.zig
deleted file mode 100644
index 04c51f8d9d..0000000000
--- a/test/behavior/optional_stage1.zig
+++ /dev/null
@@ -1,108 +0,0 @@
-const std = @import("std");
-const testing = std.testing;
-const expect = testing.expect;
-const expectEqual = testing.expectEqual;
-
-test "assigning to an unwrapped optional field in an inline loop" {
- comptime var maybe_pos_arg: ?comptime_int = null;
- inline for ("ab") |x| {
- _ = x;
- maybe_pos_arg = 0;
- if (maybe_pos_arg.? != 0) {
- @compileError("bad");
- }
- maybe_pos_arg.? = 10;
- }
-}
-
-test "coerce an anon struct literal to optional struct" {
- const S = struct {
- const Struct = struct {
- field: u32,
- };
- fn doTheTest() !void {
- var maybe_dims: ?Struct = null;
- maybe_dims = .{ .field = 1 };
- try expect(maybe_dims.?.field == 1);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "0-bit child type coerced to optional return ptr result location" {
- const S = struct {
- fn doTheTest() !void {
- var y = Foo{};
- var z = y.thing();
- try expect(z != null);
- }
-
- const Foo = struct {
- pub const Bar = struct {
- field: *Foo,
- };
-
- pub fn thing(self: *Foo) ?Bar {
- return Bar{ .field = self };
- }
- };
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "0-bit child type coerced to optional" {
- const S = struct {
- fn doTheTest() !void {
- var it: Foo = .{
- .list = undefined,
- };
- try expect(it.foo() != null);
- }
-
- const Empty = struct {};
- const Foo = struct {
- list: [10]Empty,
-
- fn foo(self: *Foo) ?*Empty {
- const data = &self.list[0];
- return data;
- }
- };
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "array of optional unaligned types" {
- const Enum = enum { one, two, three };
-
- const SomeUnion = union(enum) {
- Num: Enum,
- Other: u32,
- };
-
- const values = [_]?SomeUnion{
- SomeUnion{ .Num = .one },
- SomeUnion{ .Num = .two },
- SomeUnion{ .Num = .three },
- SomeUnion{ .Num = .one },
- SomeUnion{ .Num = .two },
- SomeUnion{ .Num = .three },
- };
-
- // The index must be a runtime value
- var i: usize = 0;
- try expectEqual(Enum.one, values[i].?.Num);
- i += 1;
- try expectEqual(Enum.two, values[i].?.Num);
- i += 1;
- try expectEqual(Enum.three, values[i].?.Num);
- i += 1;
- try expectEqual(Enum.one, values[i].?.Num);
- i += 1;
- try expectEqual(Enum.two, values[i].?.Num);
- i += 1;
- try expectEqual(Enum.three, values[i].?.Num);
-}
From f5471299d81e809c706e147d2ea79c83aeb8b650 Mon Sep 17 00:00:00 2001
From: Sebsatian Keller
Date: Thu, 10 Feb 2022 02:35:53 +0100
Subject: [PATCH 0116/2031] stage 1: improve error message if error union is
cast to payload (#10770)
Also: Added special error message for for `?T` to `T` casting
---
src/stage1/ir.cpp | 28 ++++++++++++++++++++++++++++
test/compile_errors.zig | 14 +++++++-------
2 files changed, 35 insertions(+), 7 deletions(-)
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index be6226313f..0b6332f480 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -8242,6 +8242,34 @@ static Stage1AirInst *ir_analyze_cast(IrAnalyze *ira, Scope *scope, AstNode *sou
return ir_implicit_cast2(ira, scope, source_node, cast1, wanted_type);
}
+ // E!T to T
+ if (actual_type->id == ZigTypeIdErrorUnion) {
+ if (types_match_const_cast_only(ira, actual_type->data.error_union.payload_type, wanted_type,
+ source_node, false).id == ConstCastResultIdOk)
+ {
+ ErrorMsg *parent_msg = ir_add_error_node(ira, source_node,
+ buf_sprintf("cannot convert error union to payload type. consider using `try`, `catch`, or `if`. expected type '%s', found '%s'",
+ buf_ptr(&wanted_type->name),
+ buf_ptr(&actual_type->name)));
+ report_recursive_error(ira, source_node, &const_cast_result, parent_msg);
+ return ira->codegen->invalid_inst_gen;
+ }
+ }
+
+ //?T to T
+ if (actual_type->id == ZigTypeIdOptional) {
+ if (types_match_const_cast_only(ira, actual_type->data.maybe.child_type, wanted_type,
+ source_node, false).id == ConstCastResultIdOk)
+ {
+ ErrorMsg *parent_msg = ir_add_error_node(ira, source_node,
+ buf_sprintf("cannot convert optional to payload type. consider using `.?`, `orelse`, or `if`. expected type '%s', found '%s'",
+ buf_ptr(&wanted_type->name),
+ buf_ptr(&actual_type->name)));
+ report_recursive_error(ira, source_node, &const_cast_result, parent_msg);
+ return ira->codegen->invalid_inst_gen;
+ }
+ }
+
ErrorMsg *parent_msg = ir_add_error_node(ira, source_node,
buf_sprintf("expected type '%s', found '%s'",
buf_ptr(&wanted_type->name),
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 252ec1496b..3c224013c9 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -790,9 +790,9 @@ pub fn addCases(ctx: *TestContext) !void {
"tmp.zig:1:17: note: function cannot return an error",
"tmp.zig:8:5: error: expected type 'void', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set'",
"tmp.zig:7:17: note: function cannot return an error",
- "tmp.zig:11:15: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
+ "tmp.zig:11:15: error: cannot convert error union to payload type. consider using `try`, `catch`, or `if`. expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
"tmp.zig:10:17: note: function cannot return an error",
- "tmp.zig:15:14: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
+ "tmp.zig:15:14: error: cannot convert error union to payload type. consider using `try`, `catch`, or `if`. expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
"tmp.zig:14:5: note: cannot store an error in type 'u32'",
});
@@ -1879,7 +1879,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = afoo;
\\}
, &[_][]const u8{
- "tmp.zig:12:25: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(get_uval)).Fn.return_type.?).ErrorUnion.error_set!u32'",
+ "tmp.zig:12:25: error: cannot convert error union to payload type. consider using `try`, `catch`, or `if`. expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(get_uval)).Fn.return_type.?).ErrorUnion.error_set!u32'",
});
ctx.objErrStage1("assigning to struct or union fields that are not optionals with a function that returns an optional",
@@ -1899,7 +1899,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = s;
\\}
, &[_][]const u8{
- "tmp.zig:11:27: error: expected type 'u8', found '?u8'",
+ "tmp.zig:11:27: error: cannot convert optional to payload type. consider using `.?`, `orelse`, or `if`. expected type 'u8', found '?u8'",
});
ctx.objErrStage1("missing result type for phi node",
@@ -2308,7 +2308,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ not_optional: i32,
\\};
, &[_][]const u8{
- "tmp.zig:3:36: error: expected type 'i32', found '?i32'",
+ "tmp.zig:3:36: error: cannot convert optional to payload type. consider using `.?`, `orelse`, or `if`. expected type 'i32', found '?i32'",
});
ctx.objErrStage1("result location incompatibility mismatching handle_is_ptr",
@@ -2325,7 +2325,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ not_optional: i32,
\\};
, &[_][]const u8{
- "tmp.zig:3:36: error: expected type 'i32', found '?i32'",
+ "tmp.zig:3:36: error: cannot convert optional to payload type. consider using `.?`, `orelse`, or `if`. expected type 'i32', found '?i32'",
});
ctx.objErrStage1("const frame cast to anyframe",
@@ -8828,7 +8828,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ v = u;
\\}
, &[_][]const u8{
- "tmp.zig:4:9: error: expected type '*anyopaque', found '?*anyopaque'",
+ "tmp.zig:4:9: error: cannot convert optional to payload type. consider using `.?`, `orelse`, or `if`. expected type '*anyopaque', found '?*anyopaque'",
});
ctx.objErrStage1("Issue #6823: don't allow .* to be followed by **",
From f2f1c63daff8e47b6b901d53fd58805e54fcdf78 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 18:52:32 -0700
Subject: [PATCH 0117/2031] stage2: add log and logf to freestanding libc
---
lib/std/special/c.zig | 18 +++++++++++++++---
lib/std/special/c_stage1.zig | 8 --------
2 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index a4aa4f66b2..8db26eb5a5 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -17,9 +17,13 @@ comptime {
if (builtin.zig_backend != .stage1) {
@export(memset, .{ .name = "memset", .linkage = .Strong });
@export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
+
@export(trunc, .{ .name = "trunc", .linkage = .Strong });
@export(truncf, .{ .name = "truncf", .linkage = .Strong });
@export(truncl, .{ .name = "truncl", .linkage = .Strong });
+
+ @export(log, .{ .name = "log", .linkage = .Strong });
+ @export(logf, .{ .name = "logf", .linkage = .Strong });
} else {
_ = @import("c_stage1.zig");
}
@@ -80,17 +84,25 @@ fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(
return dest;
}
-fn trunc(a: f64) f64 {
+fn trunc(a: f64) callconv(.C) f64 {
return math.trunc(a);
}
-fn truncf(a: f32) f32 {
+fn truncf(a: f32) callconv(.C) f32 {
return math.trunc(a);
}
-fn truncl(a: c_longdouble) c_longdouble {
+fn truncl(a: c_longdouble) callconv(.C) c_longdouble {
if (!long_double_is_f128) {
@panic("TODO implement this");
}
return math.trunc(a);
}
+
+fn log(a: f64) callconv(.C) f64 {
+ return math.ln(a);
+}
+
+fn logf(a: f32) callconv(.C) f32 {
+ return math.ln(a);
+}
diff --git a/lib/std/special/c_stage1.zig b/lib/std/special/c_stage1.zig
index 3ae93c2bdb..14bc2ac2de 100644
--- a/lib/std/special/c_stage1.zig
+++ b/lib/std/special/c_stage1.zig
@@ -708,14 +708,6 @@ export fn exp2f(a: f32) f32 {
return math.exp2(a);
}
-export fn log(a: f64) f64 {
- return math.ln(a);
-}
-
-export fn logf(a: f32) f32 {
- return math.ln(a);
-}
-
export fn log2(a: f64) f64 {
return math.log2(a);
}
From 65c812842dfb0db9a4c6f5af04719ebf18f2d169 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 9 Feb 2022 19:13:53 -0700
Subject: [PATCH 0118/2031] freestanding libc: fix missing functions
In the previous commit I got mixed up and cut-pasted instead of
copy-pasting. In this commit I made c_stage1.zig additionally included
for stage1 and everything else included for both. So moving forward we
move stuff over from c_stage1.zig to c.zig instead of copying.
---
lib/std/special/c.zig | 31 ++++++++++++++++-----------
lib/std/special/c_stage1.zig | 41 ------------------------------------
2 files changed, 19 insertions(+), 53 deletions(-)
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index 8db26eb5a5..d101c9f7e9 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -14,19 +14,20 @@ comptime {
// When the self-hosted compiler is further along, all the logic from c_stage1.zig will
// be migrated to this file and then c_stage1.zig will be deleted. Until then we have a
// simpler implementation of c.zig that only uses features already implemented in self-hosted.
- if (builtin.zig_backend != .stage1) {
- @export(memset, .{ .name = "memset", .linkage = .Strong });
- @export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
-
- @export(trunc, .{ .name = "trunc", .linkage = .Strong });
- @export(truncf, .{ .name = "truncf", .linkage = .Strong });
- @export(truncl, .{ .name = "truncl", .linkage = .Strong });
-
- @export(log, .{ .name = "log", .linkage = .Strong });
- @export(logf, .{ .name = "logf", .linkage = .Strong });
- } else {
+ if (builtin.zig_backend == .stage1) {
_ = @import("c_stage1.zig");
}
+
+ @export(memset, .{ .name = "memset", .linkage = .Strong });
+ @export(__memset, .{ .name = "__memset", .linkage = .Strong });
+ @export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
+
+ @export(trunc, .{ .name = "trunc", .linkage = .Strong });
+ @export(truncf, .{ .name = "truncf", .linkage = .Strong });
+ @export(truncl, .{ .name = "truncl", .linkage = .Strong });
+
+ @export(log, .{ .name = "log", .linkage = .Strong });
+ @export(logf, .{ .name = "logf", .linkage = .Strong });
}
// Avoid dragging in the runtime safety mechanisms into this .o file,
@@ -65,6 +66,12 @@ fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.C) ?[*]u8 {
return dest;
}
+fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+ if (dest_n < n)
+ @panic("buffer overflow");
+ return memset(dest, c, n);
+}
+
fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 {
@setRuntimeSafety(false);
@@ -73,7 +80,7 @@ fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(
var s = src.?;
var n = len;
while (true) {
- d.* = s.*;
+ d[0] = s[0];
n -= 1;
if (n == 0) break;
d += 1;
diff --git a/lib/std/special/c_stage1.zig b/lib/std/special/c_stage1.zig
index 14bc2ac2de..9e1c5a288c 100644
--- a/lib/std/special/c_stage1.zig
+++ b/lib/std/special/c_stage1.zig
@@ -161,32 +161,6 @@ test "strncmp" {
try std.testing.expect(strncmp("\xff", "\x02", 1) == 253);
}
-export fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1)
- dest.?[index] = c;
-
- return dest;
-}
-
-export fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
- if (dest_n < n)
- @panic("buffer overflow");
- return memset(dest, c, n);
-}
-
-export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1)
- dest.?[index] = src.?[index];
-
- return dest;
-}
-
export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
@setRuntimeSafety(false);
@@ -732,21 +706,6 @@ export fn fabsf(a: f32) f32 {
return math.fabs(a);
}
-export fn trunc(a: f64) f64 {
- return math.trunc(a);
-}
-
-export fn truncf(a: f32) f32 {
- return math.trunc(a);
-}
-
-export fn truncl(a: c_longdouble) c_longdouble {
- if (!long_double_is_f128) {
- @panic("TODO implement this");
- }
- return math.trunc(a);
-}
-
export fn round(a: f64) f64 {
return math.round(a);
}
From 57357c43e3b56fd636cd08af591c50a08223b654 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 9 Feb 2022 23:45:50 +0100
Subject: [PATCH 0119/2031] elf: pad out file to the required size when init
data
We need to pad out the file to the required maximum size equal the
final section's offset plus the section's size. We only need to
this when populating initial metadata and only when section header
was updated.
---
src/link/Elf.zig | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 9ab84de1ce..23bd5bb2dd 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -922,6 +922,21 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
// We are starting with an empty file. The default values are correct, null and empty list.
}
+
+ if (self.shdr_table_dirty) {
+ // We need to find out what the max file offset is according to section headers.
+ // Otherwise, we may end up with an ELF binary with file size not matching the final section's
+ // offset + it's filesize.
+ var max_file_offset: u64 = 0;
+
+ for (self.sections.items) |shdr| {
+ if (shdr.sh_offset + shdr.sh_size > max_file_offset) {
+ max_file_offset = shdr.sh_offset + shdr.sh_size;
+ }
+ }
+
+ try self.base.file.?.pwriteAll(&[_]u8{0}, max_file_offset);
+ }
}
pub const abbrev_compile_unit = 1;
From c10fdde5a64a46bc514500e97b8c87d19f86e431 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 00:24:52 -0700
Subject: [PATCH 0120/2031] stage2: LLVM backend: make unnamed struct globals
LLVM union globals have to be lowered as unnamed structs if the
non-most-aligned field is the active tag. In this case it bubbles up so
that structs containing unions have the same restriction.
This fix needs to be applied to optionals and other callsites of
createNamedStruct.
The bug fixed in this commit was revealed in searching for
the cause of #10837.
---
src/codegen/llvm.zig | 45 +++-
test/behavior.zig | 1 -
test/behavior/union.zig | 480 +++++++++++++++++++++++++++++++++
test/behavior/union_stage1.zig | 421 -----------------------------
4 files changed, 514 insertions(+), 433 deletions(-)
delete mode 100644 test/behavior/union_stage1.zig
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 9c9bf0d0f2..d85a16d16f 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -809,7 +809,16 @@ pub const DeclGen = struct {
};
}
- fn llvmType(dg: *DeclGen, t: Type) Error!*const llvm.Type {
+ fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool {
+ // Once `llvmType` succeeds, successive calls to it with the same Zig type
+ // are guaranteed to succeed. So if a call to `llvmType` fails here it means
+ // it is the first time lowering the type, which means the value can't possible
+ // have that type.
+ const llvm_ty = dg.llvmType(ty) catch return true;
+ return val.typeOf() != llvm_ty;
+ }
+
+ fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type {
const gpa = dg.gpa;
switch (t.zigTypeTag()) {
.Void, .NoReturn => return dg.context.voidType(),
@@ -1168,9 +1177,8 @@ pub const DeclGen = struct {
.BoundFn => @panic("TODO remove BoundFn from the language"),
- .Frame,
- .AnyFrame,
- => return dg.todo("implement llvmType for type '{}'", .{t}),
+ .Frame => @panic("TODO implement llvmType for Frame types"),
+ .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"),
}
}
@@ -1299,7 +1307,8 @@ pub const DeclGen = struct {
llvm_u32.constInt(0, .False),
llvm_u32.constInt(field_ptr.field_index, .False),
};
- return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ const uncasted = parent_ptr.constInBoundsGEP(&indices, indices.len);
+ return uncasted.constBitCast(try dg.llvmType(tv.ty));
},
.elem_ptr => {
const elem_ptr = tv.val.castTag(.elem_ptr).?.data;
@@ -1463,6 +1472,7 @@ pub const DeclGen = struct {
var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count);
defer llvm_fields.deinit(gpa);
+ var make_unnamed_struct = false;
const struct_obj = tv.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
const target = dg.module.getTarget();
@@ -1558,17 +1568,30 @@ pub const DeclGen = struct {
const field_ty = tv.ty.structFieldType(i);
if (!field_ty.hasRuntimeBits()) continue;
- llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{
+ const field_llvm_val = try dg.genTypedValue(.{
.ty = field_ty,
.val = field_val,
- }));
+ });
+
+ make_unnamed_struct = make_unnamed_struct or
+ dg.isUnnamedType(field_ty, field_llvm_val);
+
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
}
}
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- );
+ if (make_unnamed_struct) {
+ return dg.context.constStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
+ );
+ } else {
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ );
+ }
},
.Union => {
const llvm_union_ty = try dg.llvmType(tv.ty);
diff --git a/test/behavior.zig b/test/behavior.zig
index 525ae5b2a1..3e4ae36bfc 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -166,7 +166,6 @@ test {
_ = @import("behavior/tuple.zig");
_ = @import("behavior/type_stage1.zig");
_ = @import("behavior/typename.zig");
- _ = @import("behavior/union_stage1.zig");
_ = @import("behavior/union_with_members.zig");
_ = @import("behavior/var_args.zig");
_ = @import("behavior/vector.zig");
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index 1b6d0ff9cb..cdd63df44e 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -490,3 +490,483 @@ test "tagged union with all void fields but a meaningful tag" {
// TODO enable the test at comptime too
//comptime try S.doTheTest();
}
+
+test "union(enum(u32)) with specified and unspecified tag values" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ comptime try expect(Tag(Tag(MultipleChoice2)) == u32);
+ try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
+ comptime try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
+}
+
+const MultipleChoice2 = union(enum(u32)) {
+ Unspecified1: i32,
+ A: f32 = 20,
+ Unspecified2: void,
+ B: bool = 40,
+ Unspecified3: i32,
+ C: i8 = 60,
+ Unspecified4: void,
+ D: void = 1000,
+ Unspecified5: i32,
+};
+
+fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
+ try expect(@enumToInt(@as(Tag(MultipleChoice2), x)) == 60);
+ try expect(1123 == switch (x) {
+ MultipleChoice2.A => 1,
+ MultipleChoice2.B => 2,
+ MultipleChoice2.C => |v| @as(i32, 1000) + v,
+ MultipleChoice2.D => 4,
+ MultipleChoice2.Unspecified1 => 5,
+ MultipleChoice2.Unspecified2 => 6,
+ MultipleChoice2.Unspecified3 => 7,
+ MultipleChoice2.Unspecified4 => 8,
+ MultipleChoice2.Unspecified5 => 9,
+ });
+}
+
+test "switch on union with only 1 field" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ var r: PartialInst = undefined;
+ r = PartialInst.Compiled;
+ switch (r) {
+ PartialInst.Compiled => {
+ var z: PartialInstWithPayload = undefined;
+ z = PartialInstWithPayload{ .Compiled = 1234 };
+ switch (z) {
+ PartialInstWithPayload.Compiled => |x| {
+ try expect(x == 1234);
+ return;
+ },
+ }
+ },
+ }
+ unreachable;
+}
+
+const PartialInst = union(enum) {
+ Compiled,
+};
+
+const PartialInstWithPayload = union(enum) {
+ Compiled: i32,
+};
+
+test "union with only 1 field casted to its enum type which has enum value specified" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Literal = union(enum) {
+ Number: f64,
+ Bool: bool,
+ };
+
+ const ExprTag = enum(comptime_int) {
+ Literal = 33,
+ };
+
+ const Expr = union(ExprTag) {
+ Literal: Literal,
+ };
+
+ var e = Expr{ .Literal = Literal{ .Bool = true } };
+ comptime try expect(Tag(ExprTag) == comptime_int);
+ var t = @as(ExprTag, e);
+ try expect(t == Expr.Literal);
+ try expect(@enumToInt(t) == 33);
+ comptime try expect(@enumToInt(t) == 33);
+}
+
+test "@enumToInt works on unions" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Bar = union(enum) {
+ A: bool,
+ B: u8,
+ C,
+ };
+
+ const a = Bar{ .A = true };
+ var b = Bar{ .B = undefined };
+ var c = Bar.C;
+ try expect(@enumToInt(a) == 0);
+ try expect(@enumToInt(b) == 1);
+ try expect(@enumToInt(c) == 2);
+}
+
+test "comptime union field value equality" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const a0 = Setter(Attribute{ .A = false });
+ const a1 = Setter(Attribute{ .A = true });
+ const a2 = Setter(Attribute{ .A = false });
+
+ const b0 = Setter(Attribute{ .B = 5 });
+ const b1 = Setter(Attribute{ .B = 9 });
+ const b2 = Setter(Attribute{ .B = 5 });
+
+ try expect(a0 == a0);
+ try expect(a1 == a1);
+ try expect(a0 == a2);
+
+ try expect(b0 == b0);
+ try expect(b1 == b1);
+ try expect(b0 == b2);
+
+ try expect(a0 != b0);
+ try expect(a0 != a1);
+ try expect(b0 != b1);
+}
+
+const Attribute = union(enum) {
+ A: bool,
+ B: u8,
+};
+
+fn setAttribute(attr: Attribute) void {
+ _ = attr;
+}
+
+fn Setter(attr: Attribute) type {
+ return struct {
+ fn set() void {
+ setAttribute(attr);
+ }
+ };
+}
+
+test "return union init with void payload" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn entry() !void {
+ try expect(func().state == State.one);
+ }
+ const Outer = union(enum) {
+ state: State,
+ };
+ const State = union(enum) {
+ one: void,
+ two: u32,
+ };
+ fn func() Outer {
+ return Outer{ .state = State{ .one = {} } };
+ }
+ };
+ try S.entry();
+ comptime try S.entry();
+}
+
+test "@unionInit can modify a union type" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const UnionInitEnum = union(enum) {
+ Boolean: bool,
+ Byte: u8,
+ };
+
+ var value: UnionInitEnum = undefined;
+
+ value = @unionInit(UnionInitEnum, "Boolean", true);
+ try expect(value.Boolean == true);
+ value.Boolean = false;
+ try expect(value.Boolean == false);
+
+ value = @unionInit(UnionInitEnum, "Byte", 2);
+ try expect(value.Byte == 2);
+ value.Byte = 3;
+ try expect(value.Byte == 3);
+}
+
+test "@unionInit can modify a pointer value" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const UnionInitEnum = union(enum) {
+ Boolean: bool,
+ Byte: u8,
+ };
+
+ var value: UnionInitEnum = undefined;
+ var value_ptr = &value;
+
+ value_ptr.* = @unionInit(UnionInitEnum, "Boolean", true);
+ try expect(value.Boolean == true);
+
+ value_ptr.* = @unionInit(UnionInitEnum, "Byte", 2);
+ try expect(value.Byte == 2);
+}
+
+test "union no tag with struct member" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Struct = struct {};
+ const Union = union {
+ s: Struct,
+ pub fn foo(self: *@This()) void {
+ _ = self;
+ }
+ };
+ var u = Union{ .s = Struct{} };
+ u.foo();
+}
+
+test "union with comptime_int tag" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Union = union(enum(comptime_int)) {
+ X: u32,
+ Y: u16,
+ Z: u8,
+ };
+ comptime try expect(Tag(Tag(Union)) == comptime_int);
+}
+
+test "extern union doesn't trigger field check at comptime" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const U = extern union {
+ x: u32,
+ y: u8,
+ };
+
+ const x = U{ .x = 0x55AAAA55 };
+ comptime try expect(x.y == 0x55);
+}
+
+test "anonymous union literal syntax" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Number = union {
+ int: i32,
+ float: f64,
+ };
+
+ fn doTheTest() !void {
+ var i: Number = .{ .int = 42 };
+ var f = makeNumber();
+ try expect(i.int == 42);
+ try expect(f.float == 12.34);
+ }
+
+ fn makeNumber() Number {
+ return .{ .float = 12.34 };
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "function call result coerces from tagged union to the tag" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Arch = union(enum) {
+ One,
+ Two: usize,
+ };
+
+ const ArchTag = Tag(Arch);
+
+ fn doTheTest() !void {
+ var x: ArchTag = getArch1();
+ try expect(x == .One);
+
+ var y: ArchTag = getArch2();
+ try expect(y == .Two);
+ }
+
+ pub fn getArch1() Arch {
+ return .One;
+ }
+
+ pub fn getArch2() Arch {
+ return .{ .Two = 99 };
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "cast from anonymous struct to union" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const U = union(enum) {
+ A: u32,
+ B: []const u8,
+ C: void,
+ };
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = .{ .A = 123 };
+ const t1 = .{ .B = "foo" };
+ const t2 = .{ .C = {} };
+ const t3 = .{ .A = y };
+ const x0: U = t0;
+ var x1: U = t1;
+ const x2: U = t2;
+ var x3: U = t3;
+ try expect(x0.A == 123);
+ try expect(std.mem.eql(u8, x1.B, "foo"));
+ try expect(x2 == .C);
+ try expect(x3.A == y);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "cast from pointer to anonymous struct to pointer to union" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const U = union(enum) {
+ A: u32,
+ B: []const u8,
+ C: void,
+ };
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = &.{ .A = 123 };
+ const t1 = &.{ .B = "foo" };
+ const t2 = &.{ .C = {} };
+ const t3 = &.{ .A = y };
+ const x0: *const U = t0;
+ var x1: *const U = t1;
+ const x2: *const U = t2;
+ var x3: *const U = t3;
+ try expect(x0.A == 123);
+ try expect(std.mem.eql(u8, x1.B, "foo"));
+ try expect(x2.* == .C);
+ try expect(x3.A == y);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "switching on non exhaustive union" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const E = enum(u8) {
+ a,
+ b,
+ _,
+ };
+ const U = union(E) {
+ a: i32,
+ b: u32,
+ };
+ fn doTheTest() !void {
+ var a = U{ .a = 2 };
+ switch (a) {
+ .a => |val| try expect(val == 2),
+ .b => unreachable,
+ }
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "containers with single-field enums" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const A = union(enum) { f1 };
+ const B = union(enum) { f1: void };
+ const C = struct { a: A };
+ const D = struct { a: B };
+
+ fn doTheTest() !void {
+ var array1 = [1]A{A{ .f1 = {} }};
+ var array2 = [1]B{B{ .f1 = {} }};
+ try expect(array1[0] == .f1);
+ try expect(array2[0] == .f1);
+
+ var struct1 = C{ .a = A{ .f1 = {} } };
+ var struct2 = D{ .a = B{ .f1 = {} } };
+ try expect(struct1.a == .f1);
+ try expect(struct2.a == .f1);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "@unionInit on union w/ tag but no fields" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Type = enum(u8) { no_op = 105 };
+
+ const Data = union(Type) {
+ no_op: void,
+
+ pub fn decode(buf: []const u8) Data {
+ _ = buf;
+ return @unionInit(Data, "no_op", {});
+ }
+ };
+
+ comptime {
+ std.debug.assert(@sizeOf(Data) != 0);
+ }
+
+ fn doTheTest() !void {
+ var data: Data = .{ .no_op = .{} };
+ _ = data;
+ var o = Data.decode(&[_]u8{});
+ try expectEqual(Type.no_op, o);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "union enum type gets a separate scope" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const U = union(enum) {
+ a: u8,
+ const foo = 1;
+ };
+
+ fn doTheTest() !void {
+ try expect(!@hasDecl(Tag(U), "foo"));
+ }
+ };
+
+ try S.doTheTest();
+}
+
+test "global variable struct contains union initialized to non-most-aligned field" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const T = struct {
+ const U = union(enum) {
+ a: i32,
+ b: f64,
+ };
+
+ const S = struct {
+ u: U,
+ };
+
+ var s: S = .{
+ .u = .{
+ .a = 3,
+ },
+ };
+ };
+
+ T.s.u.a += 1;
+ try expect(T.s.u.a == 4);
+}
diff --git a/test/behavior/union_stage1.zig b/test/behavior/union_stage1.zig
deleted file mode 100644
index b71a7ac25d..0000000000
--- a/test/behavior/union_stage1.zig
+++ /dev/null
@@ -1,421 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const expectEqual = std.testing.expectEqual;
-const Tag = std.meta.Tag;
-
-const MultipleChoice2 = union(enum(u32)) {
- Unspecified1: i32,
- A: f32 = 20,
- Unspecified2: void,
- B: bool = 40,
- Unspecified3: i32,
- C: i8 = 60,
- Unspecified4: void,
- D: void = 1000,
- Unspecified5: i32,
-};
-
-test "union(enum(u32)) with specified and unspecified tag values" {
- comptime try expect(Tag(Tag(MultipleChoice2)) == u32);
- try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
- comptime try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
-}
-
-fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
- try expect(@enumToInt(@as(Tag(MultipleChoice2), x)) == 60);
- try expect(1123 == switch (x) {
- MultipleChoice2.A => 1,
- MultipleChoice2.B => 2,
- MultipleChoice2.C => |v| @as(i32, 1000) + v,
- MultipleChoice2.D => 4,
- MultipleChoice2.Unspecified1 => 5,
- MultipleChoice2.Unspecified2 => 6,
- MultipleChoice2.Unspecified3 => 7,
- MultipleChoice2.Unspecified4 => 8,
- MultipleChoice2.Unspecified5 => 9,
- });
-}
-
-test "switch on union with only 1 field" {
- var r: PartialInst = undefined;
- r = PartialInst.Compiled;
- switch (r) {
- PartialInst.Compiled => {
- var z: PartialInstWithPayload = undefined;
- z = PartialInstWithPayload{ .Compiled = 1234 };
- switch (z) {
- PartialInstWithPayload.Compiled => |x| {
- try expect(x == 1234);
- return;
- },
- }
- },
- }
- unreachable;
-}
-
-const PartialInst = union(enum) {
- Compiled,
-};
-
-const PartialInstWithPayload = union(enum) {
- Compiled: i32,
-};
-
-test "union with only 1 field casted to its enum type which has enum value specified" {
- const Literal = union(enum) {
- Number: f64,
- Bool: bool,
- };
-
- const ExprTag = enum(comptime_int) {
- Literal = 33,
- };
-
- const Expr = union(ExprTag) {
- Literal: Literal,
- };
-
- var e = Expr{ .Literal = Literal{ .Bool = true } };
- comptime try expect(Tag(ExprTag) == comptime_int);
- var t = @as(ExprTag, e);
- try expect(t == Expr.Literal);
- try expect(@enumToInt(t) == 33);
- comptime try expect(@enumToInt(t) == 33);
-}
-
-test "@enumToInt works on unions" {
- const Bar = union(enum) {
- A: bool,
- B: u8,
- C,
- };
-
- const a = Bar{ .A = true };
- var b = Bar{ .B = undefined };
- var c = Bar.C;
- try expect(@enumToInt(a) == 0);
- try expect(@enumToInt(b) == 1);
- try expect(@enumToInt(c) == 2);
-}
-
-const Attribute = union(enum) {
- A: bool,
- B: u8,
-};
-
-fn setAttribute(attr: Attribute) void {
- _ = attr;
-}
-
-fn Setter(attr: Attribute) type {
- return struct {
- fn set() void {
- setAttribute(attr);
- }
- };
-}
-
-test "comptime union field value equality" {
- const a0 = Setter(Attribute{ .A = false });
- const a1 = Setter(Attribute{ .A = true });
- const a2 = Setter(Attribute{ .A = false });
-
- const b0 = Setter(Attribute{ .B = 5 });
- const b1 = Setter(Attribute{ .B = 9 });
- const b2 = Setter(Attribute{ .B = 5 });
-
- try expect(a0 == a0);
- try expect(a1 == a1);
- try expect(a0 == a2);
-
- try expect(b0 == b0);
- try expect(b1 == b1);
- try expect(b0 == b2);
-
- try expect(a0 != b0);
- try expect(a0 != a1);
- try expect(b0 != b1);
-}
-
-test "return union init with void payload" {
- const S = struct {
- fn entry() !void {
- try expect(func().state == State.one);
- }
- const Outer = union(enum) {
- state: State,
- };
- const State = union(enum) {
- one: void,
- two: u32,
- };
- fn func() Outer {
- return Outer{ .state = State{ .one = {} } };
- }
- };
- try S.entry();
- comptime try S.entry();
-}
-
-test "@unionInit can modify a union type" {
- const UnionInitEnum = union(enum) {
- Boolean: bool,
- Byte: u8,
- };
-
- var value: UnionInitEnum = undefined;
-
- value = @unionInit(UnionInitEnum, "Boolean", true);
- try expect(value.Boolean == true);
- value.Boolean = false;
- try expect(value.Boolean == false);
-
- value = @unionInit(UnionInitEnum, "Byte", 2);
- try expect(value.Byte == 2);
- value.Byte = 3;
- try expect(value.Byte == 3);
-}
-
-test "@unionInit can modify a pointer value" {
- const UnionInitEnum = union(enum) {
- Boolean: bool,
- Byte: u8,
- };
-
- var value: UnionInitEnum = undefined;
- var value_ptr = &value;
-
- value_ptr.* = @unionInit(UnionInitEnum, "Boolean", true);
- try expect(value.Boolean == true);
-
- value_ptr.* = @unionInit(UnionInitEnum, "Byte", 2);
- try expect(value.Byte == 2);
-}
-
-test "union no tag with struct member" {
- const Struct = struct {};
- const Union = union {
- s: Struct,
- pub fn foo(self: *@This()) void {
- _ = self;
- }
- };
- var u = Union{ .s = Struct{} };
- u.foo();
-}
-
-test "union with comptime_int tag" {
- const Union = union(enum(comptime_int)) {
- X: u32,
- Y: u16,
- Z: u8,
- };
- comptime try expect(Tag(Tag(Union)) == comptime_int);
-}
-
-test "extern union doesn't trigger field check at comptime" {
- const U = extern union {
- x: u32,
- y: u8,
- };
-
- const x = U{ .x = 0x55AAAA55 };
- comptime try expect(x.y == 0x55);
-}
-
-test "anonymous union literal syntax" {
- const S = struct {
- const Number = union {
- int: i32,
- float: f64,
- };
-
- fn doTheTest() !void {
- var i: Number = .{ .int = 42 };
- var f = makeNumber();
- try expect(i.int == 42);
- try expect(f.float == 12.34);
- }
-
- fn makeNumber() Number {
- return .{ .float = 12.34 };
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "function call result coerces from tagged union to the tag" {
- const S = struct {
- const Arch = union(enum) {
- One,
- Two: usize,
- };
-
- const ArchTag = Tag(Arch);
-
- fn doTheTest() !void {
- var x: ArchTag = getArch1();
- try expect(x == .One);
-
- var y: ArchTag = getArch2();
- try expect(y == .Two);
- }
-
- pub fn getArch1() Arch {
- return .One;
- }
-
- pub fn getArch2() Arch {
- return .{ .Two = 99 };
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "cast from anonymous struct to union" {
- const S = struct {
- const U = union(enum) {
- A: u32,
- B: []const u8,
- C: void,
- };
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = .{ .A = 123 };
- const t1 = .{ .B = "foo" };
- const t2 = .{ .C = {} };
- const t3 = .{ .A = y };
- const x0: U = t0;
- var x1: U = t1;
- const x2: U = t2;
- var x3: U = t3;
- try expect(x0.A == 123);
- try expect(std.mem.eql(u8, x1.B, "foo"));
- try expect(x2 == .C);
- try expect(x3.A == y);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "cast from pointer to anonymous struct to pointer to union" {
- const S = struct {
- const U = union(enum) {
- A: u32,
- B: []const u8,
- C: void,
- };
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = &.{ .A = 123 };
- const t1 = &.{ .B = "foo" };
- const t2 = &.{ .C = {} };
- const t3 = &.{ .A = y };
- const x0: *const U = t0;
- var x1: *const U = t1;
- const x2: *const U = t2;
- var x3: *const U = t3;
- try expect(x0.A == 123);
- try expect(std.mem.eql(u8, x1.B, "foo"));
- try expect(x2.* == .C);
- try expect(x3.A == y);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "switching on non exhaustive union" {
- const S = struct {
- const E = enum(u8) {
- a,
- b,
- _,
- };
- const U = union(E) {
- a: i32,
- b: u32,
- };
- fn doTheTest() !void {
- var a = U{ .a = 2 };
- switch (a) {
- .a => |val| try expect(val == 2),
- .b => unreachable,
- }
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "containers with single-field enums" {
- const S = struct {
- const A = union(enum) { f1 };
- const B = union(enum) { f1: void };
- const C = struct { a: A };
- const D = struct { a: B };
-
- fn doTheTest() !void {
- var array1 = [1]A{A{ .f1 = {} }};
- var array2 = [1]B{B{ .f1 = {} }};
- try expect(array1[0] == .f1);
- try expect(array2[0] == .f1);
-
- var struct1 = C{ .a = A{ .f1 = {} } };
- var struct2 = D{ .a = B{ .f1 = {} } };
- try expect(struct1.a == .f1);
- try expect(struct2.a == .f1);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "@unionInit on union w/ tag but no fields" {
- const S = struct {
- const Type = enum(u8) { no_op = 105 };
-
- const Data = union(Type) {
- no_op: void,
-
- pub fn decode(buf: []const u8) Data {
- _ = buf;
- return @unionInit(Data, "no_op", {});
- }
- };
-
- comptime {
- std.debug.assert(@sizeOf(Data) != 0);
- }
-
- fn doTheTest() !void {
- var data: Data = .{ .no_op = .{} };
- _ = data;
- var o = Data.decode(&[_]u8{});
- try expectEqual(Type.no_op, o);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "union enum type gets a separate scope" {
- const S = struct {
- const U = union(enum) {
- a: u8,
- const foo = 1;
- };
-
- fn doTheTest() !void {
- try expect(!@hasDecl(Tag(U), "foo"));
- }
- };
-
- try S.doTheTest();
-}
From e139c41fd8955f873615b2c2434d162585c0e44c Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Thu, 10 Feb 2022 11:27:38 +0100
Subject: [PATCH 0121/2031] stage2: handle truncate to signed non-pow-two
integers
---
src/arch/x86_64/CodeGen.zig | 47 +++++++++++++++++++++++++++----------
test/behavior/basic.zig | 1 -
2 files changed, 34 insertions(+), 14 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index d78245b094..09cd09b12d 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -958,16 +958,17 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
}
- const dst_mcv = blk: {
- const reg = switch (operand) {
- .register => |reg| reg,
- else => inner: {
- const reg = try self.register_manager.allocReg(inst);
- try self.genSetReg(src_ty, reg, operand);
- break :inner reg;
- },
- };
- break :blk .{ .register = registerAlias(reg, @intCast(u32, dst_ty_size)) };
+ operand.freezeIfRegister(&self.register_manager);
+ defer operand.unfreezeIfRegister(&self.register_manager);
+
+ const reg: Register = blk: {
+ if (operand.isRegister()) {
+ if (self.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ break :blk operand.register;
+ }
+ }
+ const mcv = try self.copyToNewRegister(inst, src_ty, operand);
+ break :blk mcv.register.to64();
};
// when truncating a `u16` to `u5`, for example, those top 3 bits in the result
@@ -975,11 +976,31 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const dst_bit_size = dst_ty.bitSize(self.target.*);
const is_power_of_two = (dst_bit_size & (dst_bit_size - 1)) == 0;
if (!is_power_of_two or dst_bit_size < 8) {
- const mask = (~@as(u64, 0)) >> @intCast(u6, (64 - dst_ty.bitSize(self.target.*)));
- try self.genBinMathOpMir(.@"and", dst_ty, dst_mcv, .{ .immediate = mask });
+ const shift = @intCast(u6, 64 - dst_ty.bitSize(self.target.*));
+ const mask = (~@as(u64, 0)) >> shift;
+ try self.genBinMathOpMir(.@"and", Type.usize, .{ .register = reg }, .{ .immediate = mask });
+
+ if (src_ty.intInfo(self.target.*).signedness == .signed) {
+ _ = try self.addInst(.{
+ .tag = .sal,
+ .ops = (Mir.Ops{
+ .reg1 = reg,
+ .flags = 0b10,
+ }).encode(),
+ .data = .{ .imm = shift },
+ });
+ _ = try self.addInst(.{
+ .tag = .sar,
+ .ops = (Mir.Ops{
+ .reg1 = reg,
+ .flags = 0b10,
+ }).encode(),
+ .data = .{ .imm = shift },
+ });
+ }
}
- return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
+ return self.finishAir(inst, .{ .register = reg }, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 13d7e833d5..d87d04e246 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -26,7 +26,6 @@ fn testTruncate(x: u32) u8 {
test "truncate to non-power-of-two integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
try testTrunc(u32, u1, 0b10110, 0b0);
From 0e2fcab334083d3cbc786e891be6c97e9fd81595 Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Thu, 10 Feb 2022 21:06:16 +0100
Subject: [PATCH 0122/2031] wasm: Implement 'field_ptr' constants
This implements the `field_ptr` value for pointers. As the value only provides us with the index,
we must calculate the offset from the container type using said index. (i.e. the offset from a struct field at index 2).
Besides this, small miscellaneous fixes/updates were done to get remaining behavior tests passing:
- We start the function table index at 1, so unresolved function pointers don't can be null-checked properly.
- Implement genTypedValue for floats up to f64.
- Fix zero-sized arguments by only creating `args` for non-zero-sized types.
- lowerConstant now works for all decl_ref's.
- lowerConstant properly lowers optional pointers, so `null` pointers are lowered to `0`.
---
src/arch/wasm/CodeGen.zig | 95 ++++++++++++++++++++++++++++-----------
src/link/Wasm.zig | 6 +--
2 files changed, 73 insertions(+), 28 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index b0c24be03b..bcad7cac19 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1106,6 +1106,20 @@ pub const DeclGen = struct {
}
return Result{ .appended = {} };
},
+ .Float => {
+ const float_bits = ty.floatBits(self.target());
+ if (float_bits > 64) {
+ return self.fail("Wasm TODO: Implement f80 and f128", .{});
+ }
+
+ switch (float_bits) {
+ 16, 32 => try writer.writeIntLittle(u32, @bitCast(u32, val.toFloat(f32))),
+ 64 => try writer.writeIntLittle(u64, @bitCast(u64, val.toFloat(f64))),
+ else => unreachable,
+ }
+
+ return Result{ .appended = {} };
+ },
.Enum => {
var int_buffer: Value.Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &int_buffer);
@@ -1334,10 +1348,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
defer self.gpa.free(param_types);
fn_ty.fnParamTypes(param_types);
var result: CallWValues = .{
- .args = try self.gpa.alloc(WValue, param_types.len),
+ .args = &.{},
.return_value = .none,
};
- errdefer self.gpa.free(result.args);
+ var args = std.ArrayList(WValue).init(self.gpa);
+ defer args.deinit();
+
const ret_ty = fn_ty.fnReturnType();
// Check if we store the result as a pointer to the stack rather than
// by value
@@ -1350,18 +1366,18 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
switch (cc) {
.Naked => return result,
.Unspecified, .C => {
- for (param_types) |ty, ty_index| {
+ for (param_types) |ty| {
if (!ty.hasRuntimeBits()) {
- result.args[ty_index] = .{ .none = {} };
continue;
}
- result.args[ty_index] = .{ .local = self.local_index };
+ try args.append(.{ .local = self.local_index });
self.local_index += 1;
}
},
else => return self.fail("TODO implement function parameters for cc '{}' on wasm", .{cc}),
}
+ result.args = args.toOwnedSlice();
return result;
}
@@ -2060,6 +2076,26 @@ fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
if (val.isUndefDeep()) return self.emitUndefined(ty);
+ if (val.castTag(.decl_ref)) |decl_ref| {
+ const decl = decl_ref.data;
+ decl.markAlive();
+ const target_sym_index = decl.link.wasm.sym_index;
+ if (ty.isSlice()) {
+ var slice_len: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = val.sliceLen(),
+ };
+ var slice_val: Value.Payload.Slice = .{
+ .base = .{ .tag = .slice },
+ .data = .{ .ptr = val.slicePtr(), .len = Value.initPayload(&slice_len.base) },
+ };
+ return self.lowerConstant(Value.initPayload(&slice_val.base), ty);
+ } else if (decl.ty.zigTypeTag() == .Fn) {
+ try self.bin_file.addTableFunction(target_sym_index);
+ return WValue{ .function_index = target_sym_index };
+ } else return WValue{ .memory = target_sym_index };
+ }
+
switch (ty.zigTypeTag()) {
.Int => {
const int_info = ty.intInfo(self.target);
@@ -2084,25 +2120,6 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
else => unreachable,
},
.Pointer => switch (val.tag()) {
- .decl_ref => {
- const decl = val.castTag(.decl_ref).?.data;
- decl.markAlive();
- const target_sym_index = decl.link.wasm.sym_index;
- if (ty.isSlice()) {
- var slice_len: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = val.sliceLen(),
- };
- var slice_val: Value.Payload.Slice = .{
- .base = .{ .tag = .slice },
- .data = .{ .ptr = val.slicePtr(), .len = Value.initPayload(&slice_len.base) },
- };
- return self.lowerConstant(Value.initPayload(&slice_val.base), ty);
- } else if (decl.ty.zigTypeTag() == .Fn) {
- try self.bin_file.addTableFunction(target_sym_index);
- return WValue{ .function_index = target_sym_index };
- } else return WValue{ .memory = target_sym_index };
- },
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
const index = elem_ptr.index;
@@ -2114,6 +2131,27 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
.offset = @intCast(u32, offset),
} };
},
+ .field_ptr => {
+ const field_ptr = val.castTag(.field_ptr).?.data;
+ const container = field_ptr.container_ptr;
+ const parent_ptr = try self.lowerConstant(container, ty);
+
+ const offset = switch (container.tag()) {
+ .decl_ref => blk: {
+ const decl_ref = container.castTag(.decl_ref).?.data;
+ if (decl_ref.ty.castTag(.@"struct")) |_| {
+ const offset = decl_ref.ty.structFieldOffset(field_ptr.field_index, self.target);
+ break :blk offset;
+ }
+ return self.fail("Wasm TODO: field_ptr decl_ref for type '{}'", .{decl_ref.ty});
+ },
+ else => |tag| return self.fail("Wasm TODO: Implement field_ptr for value tag: '{s}'", .{tag}),
+ };
+ return WValue{ .memory_offset = .{
+ .pointer = parent_ptr.memory,
+ .offset = @intCast(u32, offset),
+ } };
+ },
.int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) },
.zero, .null_value => return WValue{ .imm32 = 0 },
else => return self.fail("Wasm TODO: lowerConstant for other const pointer tag {s}", .{val.tag()}),
@@ -2160,7 +2198,14 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
},
.Optional => if (ty.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
- return self.lowerConstant(val, ty.optionalChild(&buf));
+ const pl_ty = ty.optionalChild(&buf);
+ if (val.castTag(.opt_payload)) |payload| {
+ return self.lowerConstant(payload.data, pl_ty);
+ } else if (val.isNull()) {
+ return WValue{ .imm32 = 0 };
+ } else {
+ return self.lowerConstant(val, pl_ty);
+ }
} else {
const is_pl = val.tag() == .opt_payload;
return WValue{ .imm32 = if (is_pl) @as(u32, 1) else 0 };
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index d62f3a4201..81d77d5b66 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -428,7 +428,7 @@ pub fn addTableFunction(self: *Wasm, symbol_index: u32) !void {
fn mapFunctionTable(self: *Wasm) void {
var it = self.function_table.valueIterator();
- var index: u32 = 0;
+ var index: u32 = 1;
while (it.next()) |value_ptr| : (index += 1) {
value_ptr.* = index;
}
@@ -821,7 +821,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
try leb.writeULEB128(writer, wasm.reftype(.funcref));
try emitLimits(writer, .{
- .min = @intCast(u32, self.function_table.count()),
+ .min = @intCast(u32, self.function_table.count()) + 1,
.max = null,
});
@@ -931,7 +931,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
var flags: u32 = 0x2; // Yes we have a table
try leb.writeULEB128(writer, flags);
try leb.writeULEB128(writer, @as(u32, 0)); // index of that table. TODO: Store synthetic symbols
- try emitInit(writer, .{ .i32_const = 0 });
+ try emitInit(writer, .{ .i32_const = 1 }); // We start at index 1, so unresolved function pointers are invalid
try leb.writeULEB128(writer, @as(u8, 0));
try leb.writeULEB128(writer, @intCast(u32, self.function_table.count()));
var symbol_it = self.function_table.keyIterator();
From 9c6d416bec10b9edd88545287133921274af1d2f Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Thu, 10 Feb 2022 21:30:26 +0100
Subject: [PATCH 0123/2031] Activate passing behavior tests
This moves the single bugs behavior tests to the outer branch and disables the test cases
for all non-passing backends.
For the larger files, we move it up a single branch and disable it for the c backend.
All test cases that do pass for the c backend however, are enabled.
---
test/behavior.zig | 8 ++++----
test/behavior/bugs/2578.zig | 6 ++++++
test/behavior/bugs/3007.zig | 5 +++++
test/behavior/cast_llvm.zig | 24 ++++++++++++++++++++++++
test/behavior/fn.zig | 5 +++++
5 files changed, 44 insertions(+), 4 deletions(-)
diff --git a/test/behavior.zig b/test/behavior.zig
index 3e4ae36bfc..a1d8e9bef9 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -23,6 +23,8 @@ test {
_ = @import("behavior/bugs/1914.zig");
_ = @import("behavior/bugs/2006.zig");
_ = @import("behavior/bugs/2346.zig");
+ _ = @import("behavior/bugs/2578.zig");
+ _ = @import("behavior/bugs/3007.zig");
_ = @import("behavior/bugs/3112.zig");
_ = @import("behavior/bugs/3367.zig");
_ = @import("behavior/bugs/6850.zig");
@@ -58,9 +60,11 @@ test {
_ = @import("behavior/bugs/4954.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");
+ _ = @import("behavior/cast_llvm.zig");
_ = @import("behavior/defer.zig");
_ = @import("behavior/enum.zig");
_ = @import("behavior/error.zig");
+ _ = @import("behavior/fn.zig");
_ = @import("behavior/for.zig");
_ = @import("behavior/generics.zig");
_ = @import("behavior/if.zig");
@@ -93,14 +97,10 @@ test {
if (builtin.zig_backend != .stage2_c) {
// Tests that pass for stage1 and the llvm backend.
_ = @import("behavior/atomics.zig");
- _ = @import("behavior/bugs/2578.zig");
- _ = @import("behavior/bugs/3007.zig");
_ = @import("behavior/bugs/9584.zig");
- _ = @import("behavior/cast_llvm.zig");
_ = @import("behavior/error_llvm.zig");
_ = @import("behavior/eval.zig");
_ = @import("behavior/floatop.zig");
- _ = @import("behavior/fn.zig");
_ = @import("behavior/math.zig");
_ = @import("behavior/maximum_minimum.zig");
_ = @import("behavior/merge_error_sets.zig");
diff --git a/test/behavior/bugs/2578.zig b/test/behavior/bugs/2578.zig
index b27d73415e..15f5bf0e53 100644
--- a/test/behavior/bugs/2578.zig
+++ b/test/behavior/bugs/2578.zig
@@ -1,3 +1,5 @@
+const builtin = @import("builtin");
+
const Foo = struct {
y: u8,
};
@@ -10,5 +12,9 @@ fn bar(pointer: ?*anyopaque) void {
}
test "fixed" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
bar(t);
}
diff --git a/test/behavior/bugs/3007.zig b/test/behavior/bugs/3007.zig
index c08be3676a..0b3cbdc56d 100644
--- a/test/behavior/bugs/3007.zig
+++ b/test/behavior/bugs/3007.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const builtin = @import("builtin");
const Foo = struct {
free: bool,
@@ -18,6 +19,10 @@ fn get_foo() Foo.FooError!*Foo {
}
test "fixed" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
default_foo = get_foo() catch null; // This Line
try std.testing.expect(!default_foo.?.free);
}
diff --git a/test/behavior/cast_llvm.zig b/test/behavior/cast_llvm.zig
index 79c2243a50..6f9b77b8f2 100644
--- a/test/behavior/cast_llvm.zig
+++ b/test/behavior/cast_llvm.zig
@@ -6,6 +6,8 @@ const maxInt = std.math.maxInt;
const native_endian = builtin.target.cpu.arch.endian();
test "pointer reinterpret const float to int" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
// The hex representation is 0x3fe3333333333303.
const float: f64 = 5.99999999999994648725e-01;
const float_ptr = &float;
@@ -18,6 +20,8 @@ test "pointer reinterpret const float to int" {
}
test "@floatToInt" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
try testFloatToInts();
comptime try testFloatToInts();
}
@@ -33,6 +37,8 @@ fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicit cast from [*]T to ?*anyopaque" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
var a = [_]u8{ 3, 2, 1 };
var runtime_zero: usize = 0;
incrementVoidPtrArray(a[runtime_zero..].ptr, 3);
@@ -49,6 +55,7 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
test "compile time int to ptr of function" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try foobar(FUNCTION_CONSTANT);
}
@@ -61,6 +68,8 @@ fn foobar(func: PFN_void) !void {
}
test "implicit ptr to *anyopaque" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
var a: u32 = 1;
var ptr: *align(@alignOf(u32)) anyopaque = &a;
var b: *u32 = @ptrCast(*u32, ptr);
@@ -87,6 +96,7 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
}
test "peer type resolution: [0]u8 and []const u8" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
comptime {
@@ -103,6 +113,7 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
comptime try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
}
@@ -112,6 +123,7 @@ fn castToOptionalSlice() ?[]const u8 {
}
test "cast u128 to f128 and back" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime try testCast128();
try testCast128();
}
@@ -129,6 +141,7 @@ fn cast128Float(x: u128) f128 {
}
test "implicit cast from *[N]T to ?[*]T" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x: ?[*]u16 = null;
var y: [4]u16 = [4]u16{ 0, 1, 2, 3 };
@@ -140,6 +153,7 @@ test "implicit cast from *[N]T to ?[*]T" {
}
test "implicit cast from *T to ?*anyopaque" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var a: u8 = 1;
incrementVoidPtrValue(&a);
try std.testing.expect(a == 2);
@@ -150,6 +164,7 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
}
test "implicit cast *[0]T to E![]const u8" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x = @as(anyerror![]const u8, &[0]u8{});
try expect((x catch unreachable).len == 0);
}
@@ -160,11 +175,13 @@ test "cast from array reference to fn: comptime fn ptr" {
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "cast from array reference to fn: runtime fn ptr" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var f = @ptrCast(*const fn () callconv(.C) void, &global_array);
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "*const [N]null u8 to ?[]const u8" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var a = "Hello";
@@ -196,6 +213,7 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" {
var global_struct: struct { f0: usize } = undefined;
test "assignment to optional pointer result loc" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct };
try expect(foo.ptr.? == @ptrCast(*anyopaque, &global_struct));
}
@@ -217,6 +235,8 @@ fn boolToStr(b: bool) []const u8 {
}
test "cast f16 to wider types" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var x: f16 = 1234.0;
@@ -230,6 +250,9 @@ test "cast f16 to wider types" {
}
test "cast f128 to narrower types" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
const S = struct {
fn doTheTest() !void {
var x: f128 = 1234.0;
@@ -243,6 +266,7 @@ test "cast f128 to narrower types" {
}
test "peer type resolution: unreachable, null, slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest(num: usize, word: []const u8) !void {
const result = switch (num) {
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index ebbbfda67b..17a3d9a93b 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -75,6 +75,7 @@ test "return inner function which references comptime variable of outer function
}
test "discard the result of a function that returns a struct" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn entry() void {
_ = func();
@@ -94,6 +95,7 @@ test "discard the result of a function that returns a struct" {
}
test "inline function call that calls optional function pointer, return pointer at callsite interacts correctly with callsite return type" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const S = struct {
@@ -139,6 +141,7 @@ fn fnWithUnreachable() noreturn {
}
test "extern struct with stdcallcc fn pointer" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const S = extern struct {
@@ -252,6 +255,7 @@ test "implicit cast fn call result to optional in field result" {
}
test "void parameters" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try voidFun(1, void{}, 2, {});
}
fn voidFun(a: i32, b: void, c: i32, d: void) !void {
@@ -306,6 +310,7 @@ fn numberLiteralArg(a: anytype) !void {
}
test "function call with anon list literal" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try consumeVec(.{ 9, 8, 7 });
From 08e2f5d08390d41b58c92707693385e5e2968fc8 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Thu, 10 Feb 2022 13:01:23 +0100
Subject: [PATCH 0124/2031] codegen: handle lowering of const slice pointers
---
src/arch/x86_64/CodeGen.zig | 87 ++++++++++++++++++++++++++++++-------
src/codegen.zig | 1 -
2 files changed, 71 insertions(+), 17 deletions(-)
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 09cd09b12d..c0bb233499 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -3514,8 +3514,14 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
else => return self.fail("TODO implement args on stack for {} with abi size > 8", .{mcv}),
}
},
+ .embedded_in_code => {
+ if (abi_size <= 8) {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
+ }
+ return self.fail("TODO implement args on stack for {} with abi size > 8", .{mcv});
+ },
.memory,
- .embedded_in_code,
.direct_load,
.got_load,
=> {
@@ -3523,7 +3529,58 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
}
- return self.fail("TODO implement memcpy for setting args on stack from {}", .{mcv});
+
+ self.register_manager.freezeRegs(&.{ .rax, .rcx });
+ defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx });
+
+ const addr_reg: Register = blk: {
+ switch (mcv) {
+ .got_load,
+ .direct_load,
+ => |sym_index| {
+ const flags: u2 = switch (mcv) {
+ .got_load => 0b00,
+ .direct_load => 0b01,
+ else => unreachable,
+ };
+ const addr_reg = try self.register_manager.allocReg(null);
+ _ = try self.addInst(.{
+ .tag = .lea_pie,
+ .ops = (Mir.Ops{
+ .reg1 = addr_reg.to64(),
+ .flags = flags,
+ }).encode(),
+ .data = .{ .linker_sym_index = sym_index },
+ });
+ break :blk addr_reg;
+ },
+ .memory => |addr| {
+ const addr_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr });
+ break :blk addr_reg;
+ },
+ else => unreachable,
+ }
+ };
+
+ self.register_manager.freezeRegs(&.{addr_reg});
+ defer self.register_manager.unfreezeRegs(&.{addr_reg});
+
+ const regs = try self.register_manager.allocRegs(2, .{ null, null });
+ const count_reg = regs[0];
+ const tmp_reg = regs[1];
+
+ try self.register_manager.getReg(.rax, null);
+ try self.register_manager.getReg(.rcx, null);
+
+ // TODO allow for abi_size to be u64
+ try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) });
+ try self.genInlineMemcpy(
+ -(stack_offset + @intCast(i32, abi_size)),
+ .rsp,
+ addr_reg.to64(),
+ count_reg.to64(),
+ tmp_reg.to8(),
+ );
},
.register => |reg| {
_ = try self.addInst(.{
@@ -4488,6 +4545,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
+ log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty, tv.val });
const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
@@ -4520,23 +4578,20 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
- const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
- const slice_len = typed_value.val.sliceLen();
- // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
- // the Sema code needs to use anonymous Decls or alloca instructions to store data.
- const ptr_imm = ptr_mcv.memory;
- _ = slice_len;
- _ = ptr_imm;
- // We need more general support for const data being stored in memory to make this work.
- return self.fail("TODO codegen for const slices", .{});
+ return self.lowerUnnamedConst(typed_value);
},
else => {
- if (typed_value.val.tag() == .int_u64) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
+ switch (typed_value.val.tag()) {
+ .int_u64 => {
+ return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
+ },
+ .slice => {
+ return self.lowerUnnamedConst(typed_value);
+ },
+ else => {
+ return self.fail("TODO codegen more kinds of const pointers: {}", .{typed_value.val.tag()});
+ },
}
- return self.fail("TODO codegen more kinds of const pointers: {}", .{typed_value.val.tag()});
},
},
.Int => {
diff --git a/src/codegen.zig b/src/codegen.zig
index d1c249d99d..389f38a020 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -230,7 +230,6 @@ pub fn generateSymbol(
return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output);
},
.slice => {
- // TODO populate .debug_info for the slice
const slice = typed_value.val.castTag(.slice).?.data;
// generate ptr
From b9b1ab024063105a9adfe3828692867c91015dc6 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Thu, 10 Feb 2022 14:41:07 +0100
Subject: [PATCH 0125/2031] elf: store pointer relocations indexed by
containing atom
In `getDeclVAddr`, it may happen that the target `Decl` has not
been allocated space in virtual memory. In this case, we store a
relocation in the linker-global table which we will iterate over
when flushing the module, and fill in any missing address in the
final binary. Note that for optimisation, if the address was resolved
at the time of a call to `getDeclVAddr`, we skip relocating this
atom.
This commit also adds the glue code for lowering const slices in
the ARM backend.
---
src/arch/arm/CodeGen.zig | 25 ++++-----
src/codegen.zig | 30 +++++------
src/link.zig | 14 +++--
src/link/Coff.zig | 8 +--
src/link/Elf.zig | 113 ++++++++++++++++++++++++++++++++-------
src/link/MachO.zig | 53 +++++++++---------
src/link/Plan9.zig | 6 ++-
7 files changed, 163 insertions(+), 86 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 711e2a96f0..2c60027d97 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -3931,23 +3931,20 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
- const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
- const slice_len = typed_value.val.sliceLen();
- // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
- // the Sema code needs to use anonymous Decls or alloca instructions to store data.
- const ptr_imm = ptr_mcv.memory;
- _ = slice_len;
- _ = ptr_imm;
- // We need more general support for const data being stored in memory to make this work.
- return self.fail("TODO codegen for const slices", .{});
+ return self.lowerUnnamedConst(typed_value);
},
else => {
- if (typed_value.val.tag() == .int_u64) {
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
+ switch (typed_value.val.tag()) {
+ .int_u64 => {
+ return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
+ },
+ .slice => {
+ return self.lowerUnnamedConst(typed_value);
+ },
+ else => {
+ return self.fail("TODO codegen more kinds of const pointers", .{});
+ },
}
- return self.fail("TODO codegen more kinds of const pointers", .{});
},
},
.Int => {
diff --git a/src/codegen.zig b/src/codegen.zig
index 389f38a020..fd4321fee9 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -142,6 +142,7 @@ pub fn generateFunction(
pub fn generateSymbol(
bin_file: *link.File,
+ parent_atom_index: u32,
src_loc: Module.SrcLoc,
typed_value: TypedValue,
code: *std.ArrayList(u8),
@@ -177,7 +178,7 @@ pub fn generateSymbol(
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureUnusedCapacity(payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
}, code, debug_output)) {
@@ -197,7 +198,7 @@ pub fn generateSymbol(
const elem_vals = typed_value.val.castTag(.array).?.data;
const elem_ty = typed_value.ty.elemType();
for (elem_vals) |elem_val| {
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = elem_ty,
.val = elem_val,
}, code, debug_output)) {
@@ -223,11 +224,11 @@ pub fn generateSymbol(
.Pointer => switch (typed_value.val.tag()) {
.variable => {
const decl = typed_value.val.castTag(.variable).?.data.owner_decl;
- return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output);
+ return lowerDeclRef(bin_file, parent_atom_index, src_loc, typed_value, decl, code, debug_output);
},
.decl_ref => {
const decl = typed_value.val.castTag(.decl_ref).?.data;
- return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output);
+ return lowerDeclRef(bin_file, parent_atom_index, src_loc, typed_value, decl, code, debug_output);
},
.slice => {
const slice = typed_value.val.castTag(.slice).?.data;
@@ -235,7 +236,7 @@ pub fn generateSymbol(
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf);
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = slice_ptr_field_type,
.val = slice.ptr,
}, code, debug_output)) {
@@ -247,7 +248,7 @@ pub fn generateSymbol(
}
// generate length
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = Type.initTag(.usize),
.val = slice.len,
}, code, debug_output)) {
@@ -391,7 +392,7 @@ pub fn generateSymbol(
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = field_ty,
.val = field_val,
}, code, debug_output)) {
@@ -446,6 +447,7 @@ pub fn generateSymbol(
fn lowerDeclRef(
bin_file: *link.File,
+ parent_atom_index: u32,
src_loc: Module.SrcLoc,
typed_value: TypedValue,
decl: *Module.Decl,
@@ -456,7 +458,7 @@ fn lowerDeclRef(
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf);
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = slice_ptr_field_type,
.val = typed_value.val,
}, code, debug_output)) {
@@ -472,7 +474,7 @@ fn lowerDeclRef(
.base = .{ .tag = .int_u64 },
.data = typed_value.val.sliceLen(),
};
- switch (try generateSymbol(bin_file, src_loc, .{
+ switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = Type.initTag(.usize),
.val = Value.initPayload(&slice_len.base),
}, code, debug_output)) {
@@ -495,15 +497,7 @@ fn lowerDeclRef(
}
decl.markAlive();
- const vaddr = vaddr: {
- if (bin_file.cast(link.File.MachO)) |macho_file| {
- break :vaddr try macho_file.getDeclVAddrWithReloc(decl, code.items.len);
- }
- // TODO handle the dependency of this symbol on the decl's vaddr.
- // If the decl changes vaddr, then this symbol needs to get regenerated.
- break :vaddr bin_file.getDeclVAddr(decl);
- };
-
+ const vaddr = try bin_file.getDeclVAddr(decl, parent_atom_index, code.items.len);
const endian = target.cpu.arch.endian();
switch (ptr_width) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian),
diff --git a/src/link.zig b/src/link.zig
index 56b88bffef..c5d14eb75a 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -684,12 +684,16 @@ pub const File = struct {
}
}
- pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
+ /// Get allocated `Decl`'s address in virtual memory.
+ /// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's
+ /// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the
+ /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory.
+ pub fn getDeclVAddr(base: *File, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
switch (base.tag) {
- .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
- .elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
- .macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl),
+ .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
+ .elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
+ .macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
.c => unreachable,
.wasm => unreachable,
.spirv => unreachable,
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 2f500e6b91..32d4d38235 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -726,7 +726,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ const res = try codegen.generateSymbol(&self.base, 0, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl.val,
}, &code_buffer, .none);
@@ -751,7 +751,7 @@ fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []co
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
if (need_realloc) {
- const curr_vaddr = self.getDeclVAddr(decl);
+ const curr_vaddr = self.text_section_virtual_address + decl.link.coff.text_offset;
const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
if (vaddr != curr_vaddr) {
@@ -1465,7 +1465,9 @@ fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 {
return null;
}
-pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
+pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
+ _ = parent_atom_index;
+ _ = offset;
assert(self.llvm_object == null);
return self.text_section_virtual_address + decl.link.coff.text_offset;
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 23bd5bb2dd..467bbeee54 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -145,6 +145,7 @@ decls: std.AutoHashMapUnmanaged(*Module.Decl, ?u16) = .{},
/// at present owned by Module.Decl.
/// TODO consolidate this.
managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, *TextBlock) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -179,6 +180,18 @@ dbg_info_decl_free_list: std.AutoHashMapUnmanaged(*TextBlock, void) = .{},
dbg_info_decl_first: ?*TextBlock = null,
dbg_info_decl_last: ?*TextBlock = null,
+/// A table of relocations indexed by the owning them `TextBlock`.
+/// Note that once we refactor `TextBlock`'s lifetime and ownership rules,
+/// this will be a table indexed by index into the list of Atoms.
+relocs: RelocTable = .{},
+
+const Reloc = struct {
+ target: u32,
+ offset: u64,
+ prev_vaddr: u64,
+};
+
+const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*TextBlock));
/// When allocating, the ideal_capacity is calculated by
@@ -397,12 +410,36 @@ pub fn deinit(self: *Elf) void {
}
self.unnamed_const_atoms.deinit(self.base.allocator);
}
+
+ {
+ var it = self.relocs.valueIterator();
+ while (it.next()) |relocs| {
+ relocs.deinit(self.base.allocator);
+ }
+ self.relocs.deinit(self.base.allocator);
+ }
+
+ self.atom_by_index_table.deinit(self.base.allocator);
}
-pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl) u64 {
+pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
assert(self.llvm_object == null);
assert(decl.link.elf.local_sym_index != 0);
- return self.local_symbols.items[decl.link.elf.local_sym_index].st_value;
+
+ const target = decl.link.elf.local_sym_index;
+ const vaddr = self.local_symbols.items[target].st_value;
+ const atom = self.atom_by_index_table.get(parent_atom_index).?;
+ const gop = try self.relocs.getOrPut(self.base.allocator, atom);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ try gop.value_ptr.append(self.base.allocator, .{
+ .target = target,
+ .offset = offset,
+ .prev_vaddr = vaddr,
+ });
+
+ return vaddr;
}
fn getDebugLineProgramOff(self: Elf) u32 {
@@ -991,6 +1028,41 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
.p64 => 12,
};
+ {
+ var it = self.relocs.iterator();
+ while (it.next()) |entry| {
+ const atom = entry.key_ptr.*;
+ const relocs = entry.value_ptr.*;
+ const source_sym = self.local_symbols.items[atom.local_sym_index];
+ const source_shdr = self.sections.items[source_sym.st_shndx];
+
+ log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
+
+ for (relocs.items) |*reloc| {
+ const target_sym = self.local_symbols.items[reloc.target];
+ const target_vaddr = target_sym.st_value;
+
+ if (target_vaddr == reloc.prev_vaddr) continue;
+
+ const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
+ const file_offset = source_shdr.sh_offset + section_offset;
+
+ log.debug(" ({x}: [() => 0x{x}] ({s}))", .{
+ reloc.offset,
+ target_vaddr,
+ self.getString(target_sym.st_name),
+ });
+
+ switch (self.ptr_width) {
+ .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@intCast(u32, target_vaddr)), file_offset),
+ .p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset),
+ }
+
+ reloc.prev_vaddr = target_vaddr;
+ }
+ }
+ }
+
// Unfortunately these have to be buffered and done at the end because ELF does not allow
// mixing local and global symbols within a symbol table.
try self.writeAllGlobalSymbols();
@@ -2508,6 +2580,7 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
log.debug("allocating symbol indexes for {s}", .{decl.name});
decl.link.elf.local_sym_index = try self.allocateLocalSymbol();
+ try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.elf.local_sym_index, &decl.link.elf);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.elf.offset_table_index = i;
@@ -2525,6 +2598,7 @@ fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void {
self.freeTextBlock(atom, self.phdr_load_ro_index.?);
self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
self.local_symbols.items[atom.local_sym_index].st_info = 0;
+ _ = self.atom_by_index_table.remove(atom.local_sym_index);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -2543,11 +2617,11 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
if (decl.link.elf.local_sym_index != 0) {
self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {};
- self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
-
self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0;
-
+ _ = self.atom_by_index_table.remove(decl.link.elf.local_sym_index);
decl.link.elf.local_sym_index = 0;
+
+ self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
}
// TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
// is desired for both.
@@ -2993,7 +3067,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ const res = try codegen.generateSymbol(&self.base, decl.link.elf.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@@ -3028,19 +3102,6 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
}
const unnamed_consts = gop.value_ptr;
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
- .none = .{},
- });
- const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl, em);
- return error.AnalysisFail;
- },
- };
-
const atom = try self.base.allocator.create(TextBlock);
errdefer self.base.allocator.destroy(atom);
atom.* = TextBlock.empty;
@@ -3056,6 +3117,20 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
log.debug("allocating symbol indexes for {s}", .{name});
atom.local_sym_index = try self.allocateLocalSymbol();
+ try self.atom_by_index_table.putNoClobber(self.base.allocator, atom.local_sym_index, atom);
+
+ const res = try codegen.generateSymbol(&self.base, atom.local_sym_index, decl.srcLoc(), typed_value, &code_buffer, .{
+ .none = .{},
+ });
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return error.AnalysisFail;
+ },
+ };
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const phdr_index = self.phdr_load_ro_index.?;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 065145cdc8..37040f267f 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -3745,19 +3745,6 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
}
const unnamed_consts = gop.value_ptr;
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
- .none = .{},
- });
- const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl, em);
- return error.AnalysisFail;
- },
- };
-
const name_str_index = blk: {
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl.name, index });
@@ -3772,12 +3759,27 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
const match = (try self.getMatchingSection(.{
.segname = makeStaticString("__TEXT"),
.sectname = makeStaticString("__const"),
- .size = code.len,
+ .size = @sizeOf(u64),
.@"align" = math.log2(required_alignment),
})).?;
const local_sym_index = try self.allocateLocalSymbol();
- const atom = try self.createEmptyAtom(local_sym_index, code.len, math.log2(required_alignment));
- mem.copy(u8, atom.code.items, code);
+ const atom = try self.createEmptyAtom(local_sym_index, @sizeOf(u64), math.log2(required_alignment));
+
+ const res = try codegen.generateSymbol(&self.base, local_sym_index, decl.srcLoc(), typed_value, &code_buffer, .{
+ .none = .{},
+ });
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return error.AnalysisFail;
+ },
+ };
+
+ atom.code.clearRetainingCapacity();
+ try atom.code.appendSlice(self.base.allocator, code);
const addr = try self.allocateAtom(atom, code.len, required_alignment, match);
log.debug("allocated atom for {s} at 0x{x}", .{ name, addr });
@@ -3841,7 +3843,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = if (debug_buffers) |dbg|
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ try codegen.generateSymbol(&self.base, decl.link.elf.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@@ -3852,7 +3854,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
},
})
else
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ try codegen.generateSymbol(&self.base, decl.link.elf.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none);
@@ -4341,16 +4343,17 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
}
}
-pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
+pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
+ assert(self.llvm_object == null);
assert(decl.link.macho.local_sym_index != 0);
- return self.locals.items[decl.link.macho.local_sym_index].n_value;
-}
-pub fn getDeclVAddrWithReloc(self: *MachO, decl: *const Module.Decl, offset: u64) !u64 {
- assert(decl.link.macho.local_sym_index != 0);
- assert(self.active_decl != null);
+ // TODO cache local_sym_index => atom!!!
+ const atom: *Atom = blk: for (self.managed_atoms.items) |atom| {
+ if (atom.local_sym_index == parent_atom_index) {
+ break :blk atom;
+ }
+ } else unreachable;
- const atom = &self.active_decl.?.link.macho;
try atom.relocs.append(self.base.allocator, .{
.offset = @intCast(u32, offset),
.target = .{ .local = decl.link.macho.local_sym_index },
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index c2d6d61066..ee7272ca8d 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -302,7 +302,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ const res = try codegen.generateSymbol(&self.base, @intCast(u32, decl.link.plan9.sym_index.?), decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = .{} });
@@ -749,7 +749,9 @@ pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
_ = self;
_ = decl;
}
-pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl) u64 {
+pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
+ _ = parent_atom_index;
+ _ = offset;
if (decl.ty.zigTypeTag() == .Fn) {
var start = self.bases.text;
var it_file = self.fn_decl_table.iterator();
From 066758b1a296aa6f01d505f7b90d5aee2f387d30 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 11 Feb 2022 12:16:32 +0100
Subject: [PATCH 0126/2031] macho: correctly lower slices incl reloc and rebase
tracking
Match changes required to `Elf` linker, which enable lowering
of const slices on `MachO` targets.
Expand `Mir` instructions requiring the knowledge of the containing
atom - pass the symbol index into the linker's table from codegen
via mir to emitter, to then utilise it in the linker.
---
src/arch/aarch64/CodeGen.zig | 16 ++++++++++-
src/arch/aarch64/Emit.zig | 14 +++++-----
src/arch/aarch64/Mir.zig | 8 +++++-
src/arch/x86_64/CodeGen.zig | 52 ++++++++++++++++++++++++++++++++---
src/arch/x86_64/Emit.zig | 19 ++++++++-----
src/arch/x86_64/Mir.zig | 21 ++++++++++----
src/arch/x86_64/PrintMir.zig | 4 +--
src/link/MachO.zig | 53 ++++++++++++++----------------------
src/link/Plan9.zig | 4 ++-
9 files changed, 130 insertions(+), 61 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 42f2c66df1..79fa38e275 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1617,7 +1617,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.addInst(.{
.tag = .call_extern,
- .data = .{ .extern_fn = n_strx },
+ .data = .{
+ .extern_fn = .{
+ .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
+ .sym_name = n_strx,
+ },
+ },
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -2485,9 +2490,18 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
},
.memory => |addr| {
+ const owner_decl = self.mod_fn.owner_decl;
+ // TODO when refactoring LinkBlock, make this into a generic function.
+ const atom_index = switch (self.bin_file.tag) {
+ .macho => owner_decl.link.macho.local_sym_index,
+ .elf => owner_decl.link.elf.local_sym_index,
+ .plan9 => @intCast(u32, owner_decl.link.plan9.sym_index orelse 0),
+ else => return self.fail("TODO handle aarch64 load memory in {}", .{self.bin_file.tag}),
+ };
_ = try self.addInst(.{
.tag = .load_memory,
.data = .{ .payload = try self.addExtra(Mir.LoadMemory{
+ .atom_index = atom_index,
.register = @enumToInt(reg),
.addr = @intCast(u32, addr),
}) },
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3528bae709..5b2610f508 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -537,7 +537,7 @@ fn mirDebugEpilogueBegin(self: *Emit) !void {
fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
assert(emit.mir.instructions.items(.tag)[inst] == .call_extern);
- const n_strx = emit.mir.instructions.items(.data)[inst].extern_fn;
+ const extern_fn = emit.mir.instructions.items(.data)[inst].extern_fn;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const offset = blk: {
@@ -547,9 +547,10 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk offset;
};
// Add relocation to the decl.
- try macho_file.active_decl.?.link.macho.relocs.append(emit.bin_file.allocator, .{
+ const atom = macho_file.atom_by_index_table.get(extern_fn.atom_index).?;
+ try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
- .target = .{ .global = n_strx },
+ .target = .{ .global = extern_fn.sym_name },
.addend = 0,
.subtractor = null,
.pcrel = true,
@@ -613,10 +614,9 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
));
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- // TODO I think the reloc might be in the wrong place.
- const decl = macho_file.active_decl.?;
+ const atom = macho_file.atom_by_index_table.get(load_memory.atom_index).?;
// Page reloc for adrp instruction.
- try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
+ try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
.target = .{ .local = addr },
.addend = 0,
@@ -626,7 +626,7 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_GOT_LOAD_PAGE21),
});
// Pageoff reloc for adrp instruction.
- try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
+ try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset + 4,
.target = .{ .local = addr },
.addend = 0,
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 5b232a08c0..5546b32652 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -134,7 +134,12 @@ pub const Inst = struct {
/// An extern function
///
/// Used by e.g. call_extern
- extern_fn: u32,
+ extern_fn: struct {
+ /// Index of the containing atom.
+ atom_index: u32,
+ /// Index into the linker's string table.
+ sym_name: u32,
+ },
/// A 16-bit immediate value.
///
/// Used by e.g. svc
@@ -278,6 +283,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
}
pub const LoadMemory = struct {
+ atom_index: u32,
register: u32,
addr: u32,
};
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index c0bb233499..f9235512a7 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -1897,7 +1897,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.reg1 = addr_reg.to64(),
.flags = flags,
}).encode(),
- .data = .{ .linker_sym_index = sym_index },
+ .data = .{
+ .load_reloc = .{
+ .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
+ .sym_index = sym_index,
+ },
+ },
});
break :blk addr_reg;
},
@@ -2670,7 +2675,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
- .data = .{ .extern_fn = n_strx },
+ .data = .{
+ .extern_fn = .{
+ .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
+ .sym_name = n_strx,
+ },
+ },
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -3550,7 +3560,12 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.reg1 = addr_reg.to64(),
.flags = flags,
}).encode(),
- .data = .{ .linker_sym_index = sym_index },
+ .data = .{
+ .load_reloc = .{
+ .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
+ .sym_index = sym_index,
+ },
+ },
});
break :blk addr_reg;
},
@@ -3767,6 +3782,30 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr });
break :blk reg;
},
+ .direct_load,
+ .got_load,
+ => |sym_index| {
+ const flags: u2 = switch (mcv) {
+ .got_load => 0b00,
+ .direct_load => 0b01,
+ else => unreachable,
+ };
+ const addr_reg = try self.register_manager.allocReg(null);
+ _ = try self.addInst(.{
+ .tag = .lea_pie,
+ .ops = (Mir.Ops{
+ .reg1 = addr_reg.to64(),
+ .flags = flags,
+ }).encode(),
+ .data = .{
+ .load_reloc = .{
+ .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
+ .sym_index = sym_index,
+ },
+ },
+ });
+ break :blk addr_reg;
+ },
else => {
return self.fail("TODO implement memcpy for setting stack from {}", .{mcv});
},
@@ -4202,7 +4241,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.reg1 = reg,
.flags = flags,
}).encode(),
- .data = .{ .linker_sym_index = sym_index },
+ .data = .{
+ .load_reloc = .{
+ .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
+ .sym_index = sym_index,
+ },
+ },
});
// MOV reg, [reg]
_ = try self.addInst(.{
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 3f221f0f19..128ea52847 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -763,6 +763,7 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .lea_pie);
const ops = Mir.Ops.decode(emit.mir.instructions.items(.ops)[inst]);
+ const load_reloc = emit.mir.instructions.items(.data)[inst].load_reloc;
// lea reg1, [rip + reloc]
// RM
@@ -772,18 +773,19 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
emit.code,
);
+
const end_offset = emit.code.items.len;
- const sym_index = emit.mir.instructions.items(.data)[inst].linker_sym_index;
+
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const reloc_type = switch (ops.flags) {
0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => return emit.fail("TODO unused LEA PIE variants 0b10 and 0b11", .{}),
};
- const decl = macho_file.active_decl.?;
- try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
+ const atom = macho_file.atom_by_index_table.get(load_reloc.atom_index).?;
+ try atom.relocs.append(emit.bin_file.allocator, .{
.offset = @intCast(u32, end_offset - 4),
- .target = .{ .local = sym_index },
+ .target = .{ .local = load_reloc.sym_index },
.addend = 0,
.subtractor = null,
.pcrel = true,
@@ -801,17 +803,20 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .call_extern);
- const n_strx = emit.mir.instructions.items(.data)[inst].extern_fn;
+ const extern_fn = emit.mir.instructions.items(.data)[inst].extern_fn;
+
const offset = blk: {
// callq
try lowerToDEnc(.call_near, 0, emit.code);
break :blk @intCast(u32, emit.code.items.len) - 4;
};
+
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- try macho_file.active_decl.?.link.macho.relocs.append(emit.bin_file.allocator, .{
+ const atom = macho_file.atom_by_index_table.get(extern_fn.atom_index).?;
+ try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
- .target = .{ .global = n_strx },
+ .target = .{ .global = extern_fn.sym_name },
.addend = 0,
.subtractor = null,
.pcrel = true,
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index aaabcab04d..046cb0e9f6 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -185,7 +185,7 @@ pub const Inst = struct {
/// 0b00 reg1, [rip + reloc] // via GOT emits X86_64_RELOC_GOT relocation
/// 0b01 reg1, [rip + reloc] // direct load emits X86_64_RELOC_SIGNED relocation
/// Notes:
- /// * `Data` contains `linker_sym_index`
+ /// * `Data` contains `load_reloc`
lea_pie,
/// ops flags: form:
@@ -350,10 +350,19 @@ pub const Inst = struct {
/// A 32-bit immediate value.
imm: u32,
/// An extern function.
- /// Index into the linker's string table.
- extern_fn: u32,
- /// Entry in the linker's symbol table.
- linker_sym_index: u32,
+ extern_fn: struct {
+ /// Index of the containing atom.
+ atom_index: u32,
+ /// Index into the linker's string table.
+ sym_name: u32,
+ },
+ /// PIE load relocation.
+ load_reloc: struct {
+ /// Index of the containing atom.
+ atom_index: u32,
+ /// Index into the linker's symbol table.
+ sym_index: u32,
+ },
/// Index into `extra`. Meaning of what can be found there is context-dependent.
payload: u32,
};
@@ -362,7 +371,7 @@ pub const Inst = struct {
// Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
comptime {
if (builtin.mode != .Debug) {
- assert(@sizeOf(Inst) == 8);
+ assert(@sizeOf(Data) == 8);
}
}
};
diff --git a/src/arch/x86_64/PrintMir.zig b/src/arch/x86_64/PrintMir.zig
index 7c96b15210..4ab32878be 100644
--- a/src/arch/x86_64/PrintMir.zig
+++ b/src/arch/x86_64/PrintMir.zig
@@ -450,6 +450,7 @@ fn mirLea(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
fn mirLeaPie(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
+ const load_reloc = print.mir.instructions.items(.data)[inst].load_reloc;
try w.print("lea {s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
@@ -459,9 +460,8 @@ fn mirLeaPie(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
else => unreachable,
}
try w.print("[rip + 0x0] ", .{});
- const sym_index = print.mir.instructions.items(.data)[inst].linker_sym_index;
if (print.bin_file.cast(link.File.MachO)) |macho_file| {
- const target = macho_file.locals.items[sym_index];
+ const target = macho_file.locals.items[load_reloc.sym_index];
const target_name = macho_file.getString(target.n_strx);
try w.print("target@{s}", .{target_name});
} else {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 37040f267f..4aa627ca39 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -40,6 +40,7 @@ const StringIndexContext = std.hash_map.StringIndexContext;
const Trie = @import("MachO/Trie.zig");
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
+const Value = @import("../value.zig").Value;
pub const TextBlock = Atom;
@@ -220,6 +221,7 @@ atoms: std.AutoHashMapUnmanaged(MatchingSection, *Atom) = .{},
/// at present owned by Module.Decl.
/// TODO consolidate this.
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -248,12 +250,6 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// TODO consolidate this.
decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, ?MatchingSection) = .{},
-/// Currently active Module.Decl.
-/// TODO this might not be necessary if we figure out how to pass Module.Decl instance
-/// to codegen.genSetReg() or alternatively move PIE displacement for MCValue{ .memory = x }
-/// somewhere else in the codegen.
-active_decl: ?*Module.Decl = null,
-
const Entry = struct {
target: Atom.Relocation.Target,
atom: *Atom,
@@ -3441,6 +3437,8 @@ pub fn deinit(self: *MachO) void {
}
self.unnamed_const_atoms.deinit(self.base.allocator);
}
+
+ self.atom_by_index_table.deinit(self.base.allocator);
}
pub fn closeFiles(self: MachO) void {
@@ -3647,6 +3645,7 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
if (decl.link.macho.local_sym_index != 0) return;
decl.link.macho.local_sym_index = try self.allocateLocalSymbol();
+ try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.macho.local_sym_index, &decl.link.macho);
try self.decls.putNoClobber(self.base.allocator, decl, null);
const got_target = .{ .local = decl.link.macho.local_sym_index };
@@ -3693,8 +3692,6 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
}
}
- self.active_decl = decl;
-
const res = if (debug_buffers) |dbg|
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
.dwarf = .{
@@ -3756,14 +3753,9 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
log.debug("allocating symbol indexes for {s}", .{name});
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
- const match = (try self.getMatchingSection(.{
- .segname = makeStaticString("__TEXT"),
- .sectname = makeStaticString("__const"),
- .size = @sizeOf(u64),
- .@"align" = math.log2(required_alignment),
- })).?;
const local_sym_index = try self.allocateLocalSymbol();
const atom = try self.createEmptyAtom(local_sym_index, @sizeOf(u64), math.log2(required_alignment));
+ try self.atom_by_index_table.putNoClobber(self.base.allocator, local_sym_index, atom);
const res = try codegen.generateSymbol(&self.base, local_sym_index, decl.srcLoc(), typed_value, &code_buffer, .{
.none = .{},
@@ -3780,6 +3772,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
atom.code.clearRetainingCapacity();
try atom.code.appendSlice(self.base.allocator, code);
+
+ const match = try self.getMatchingSectionAtom(atom, typed_value.ty, typed_value.val);
const addr = try self.allocateAtom(atom, code.len, required_alignment, match);
log.debug("allocated atom for {s} at 0x{x}", .{ name, addr });
@@ -3839,11 +3833,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
}
}
- self.active_decl = decl;
-
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = if (debug_buffers) |dbg|
- try codegen.generateSymbol(&self.base, decl.link.elf.local_sym_index, decl.srcLoc(), .{
+ try codegen.generateSymbol(&self.base, decl.link.macho.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@@ -3854,7 +3846,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
},
})
else
- try codegen.generateSymbol(&self.base, decl.link.elf.local_sym_index, decl.srcLoc(), .{
+ try codegen.generateSymbol(&self.base, decl.link.macho.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none);
@@ -3908,13 +3900,11 @@ fn isElemTyPointer(ty: Type) bool {
}
}
-fn getMatchingSectionDecl(self: *MachO, decl: *Module.Decl) !MatchingSection {
- const code = decl.link.macho.code.items;
- const alignment = decl.ty.abiAlignment(self.base.options.target);
+fn getMatchingSectionAtom(self: *MachO, atom: *Atom, ty: Type, val: Value) !MatchingSection {
+ const code = atom.code.items;
+ const alignment = ty.abiAlignment(self.base.options.target);
const align_log_2 = math.log2(alignment);
- const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
- const val = decl.val;
const mode = self.base.options.optimize_mode;
const match: MatchingSection = blk: {
// TODO finish and audit this function
@@ -4023,9 +4013,11 @@ fn getMatchingSectionDecl(self: *MachO, decl: *Module.Decl) !MatchingSection {
},
}
};
+ const local = self.locals.items[atom.local_sym_index];
const seg = self.load_commands.items[match.seg].segment;
const sect = seg.sections.items[match.sect];
- log.debug(" allocating atom in '{s},{s}' ({d},{d})", .{
+ log.debug(" allocating atom '{s}' in '{s},{s}' ({d},{d})", .{
+ self.getString(local.n_strx),
sect.segName(),
sect.sectName(),
match.seg,
@@ -4041,7 +4033,7 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
const decl_ptr = self.decls.getPtr(decl).?;
if (decl_ptr.* == null) {
- decl_ptr.* = try self.getMatchingSectionDecl(decl);
+ decl_ptr.* = try self.getMatchingSectionAtom(&decl.link.macho, decl.ty, decl.val);
}
const match = decl_ptr.*.?;
@@ -4290,6 +4282,8 @@ fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void {
}, true);
self.locals_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
self.locals.items[atom.local_sym_index].n_type = 0;
+ _ = self.atom_by_index_table.remove(atom.local_sym_index);
+ atom.local_sym_index = 0;
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -4316,6 +4310,7 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
}
self.locals.items[decl.link.macho.local_sym_index].n_type = 0;
+ _ = self.atom_by_index_table.remove(decl.link.macho.local_sym_index);
decl.link.macho.local_sym_index = 0;
}
if (self.d_sym) |*ds| {
@@ -4347,13 +4342,7 @@ pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl, parent_atom_index: u
assert(self.llvm_object == null);
assert(decl.link.macho.local_sym_index != 0);
- // TODO cache local_sym_index => atom!!!
- const atom: *Atom = blk: for (self.managed_atoms.items) |atom| {
- if (atom.local_sym_index == parent_atom_index) {
- break :blk atom;
- }
- } else unreachable;
-
+ const atom = self.atom_by_index_table.get(parent_atom_index).?;
try atom.relocs.append(self.base.allocator, .{
.offset = @intCast(u32, offset),
.target = .{ .local = decl.link.macho.local_sym_index },
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index ee7272ca8d..4eaa6ed26b 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -302,7 +302,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = try codegen.generateSymbol(&self.base, @intCast(u32, decl.link.plan9.sym_index.?), decl.srcLoc(), .{
+ // TODO we need the symbol index for symbol in the table of locals for the containing atom
+ const sym_index = decl.link.plan9.sym_index orelse 0;
+ const res = try codegen.generateSymbol(&self.base, @intCast(u32, sym_index), decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = .{} });
From cad3e3e63a238902cdd80eb2504c879d6637a4d5 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 11 Feb 2022 12:35:31 +0100
Subject: [PATCH 0127/2031] x64: enable more behavior tests
---
test/behavior.zig | 2 +-
test/behavior/basic.zig | 9 -------
test/behavior/slice.zig | 58 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 59 insertions(+), 10 deletions(-)
diff --git a/test/behavior.zig b/test/behavior.zig
index a1d8e9bef9..86e48f1797 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -38,6 +38,7 @@ test {
_ = @import("behavior/optional.zig");
_ = @import("behavior/prefetch.zig");
_ = @import("behavior/pub_enum.zig");
+ _ = @import("behavior/slice.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
_ = @import("behavior/type.zig");
_ = @import("behavior/truncate.zig");
@@ -76,7 +77,6 @@ test {
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
- _ = @import("behavior/slice.zig");
_ = @import("behavior/src.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/try.zig");
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index d87d04e246..5d7bb3b9a7 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -120,14 +120,12 @@ test "return string from function" {
test "hex escape" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
test "multiline string" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -140,7 +138,6 @@ test "multiline string" {
test "multiline string comments at start" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
//\\one
@@ -153,7 +150,6 @@ test "multiline string comments at start" {
test "multiline string comments at end" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -166,7 +162,6 @@ test "multiline string comments at end" {
test "multiline string comments in middle" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -179,7 +174,6 @@ test "multiline string comments in middle" {
test "multiline string comments at multiple places" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -193,14 +187,11 @@ test "multiline string comments at multiple places" {
}
test "string concatenation" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try expect(mem.eql(u8, "OK" ++ " IT " ++ "WORKED", "OK IT WORKED"));
}
test "array mult operator" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
}
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 64bd972ead..327b8f4f76 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -27,7 +27,10 @@ comptime {
}
test "slicing" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
var array: [20]i32 = undefined;
array[5] = 1234;
@@ -45,6 +48,8 @@ test "slicing" {
test "const slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
comptime {
const a = "1234567890";
try expect(a.len == 10);
@@ -56,6 +61,8 @@ test "const slice" {
test "comptime slice of undefined pointer of length 0" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
const slice1 = @as([*]i32, undefined)[0..0];
try expect(slice1.len == 0);
const slice2 = @as([*]i32, undefined)[100..100];
@@ -64,6 +71,8 @@ test "comptime slice of undefined pointer of length 0" {
test "implicitly cast array of size 0 to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
var msg = [_]u8{};
try assertLenIsZero(&msg);
}
@@ -74,6 +83,8 @@ fn assertLenIsZero(msg: []const u8) !void {
test "access len index of sentinel-terminated slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
const S = struct {
fn doTheTest() !void {
var slice: [:0]const u8 = "hello";
@@ -88,6 +99,8 @@ test "access len index of sentinel-terminated slice" {
test "comptime slice of slice preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
comptime {
var buff: [10]u8 = undefined;
buff[0..][0..][0] = 1;
@@ -97,6 +110,8 @@ test "comptime slice of slice preserves comptime var" {
test "slice of type" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
comptime {
var types_array = [_]type{ i32, f64, type };
for (types_array) |T, i| {
@@ -120,6 +135,9 @@ test "slice of type" {
test "generic malloc free" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const a = memAlloc(u8, 10) catch unreachable;
memFree(u8, a);
}
@@ -133,6 +151,8 @@ fn memFree(comptime T: type, memory: []T) void {
test "slice of hardcoded address to pointer" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
const S = struct {
fn doTheTest() !void {
const pointer = @intToPtr([*]u8, 0x04)[0..2];
@@ -148,6 +168,8 @@ test "slice of hardcoded address to pointer" {
test "comptime slice of pointer preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
comptime {
var buff: [10]u8 = undefined;
var a = @ptrCast([*]u8, &buff);
@@ -158,6 +180,8 @@ test "comptime slice of pointer preserves comptime var" {
test "comptime pointer cast array and then slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
const ptrA: [*]const u8 = @ptrCast([*]const u8, &array);
@@ -172,6 +196,9 @@ test "comptime pointer cast array and then slice" {
test "slicing zero length array" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const s1 = ""[0..];
const s2 = ([_]u32{})[0..];
try expect(s1.len == 0);
@@ -185,6 +212,8 @@ const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(@ptrToInt(x) == 0x1000);
try expect(x.len == 0x500);
@@ -194,6 +223,9 @@ test "compile time slice of pointer to hard coded address" {
}
test "slice string literal has correct type" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
comptime {
try expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8);
const array = [_]i32{ 1, 2, 3, 4 };
@@ -207,6 +239,7 @@ test "slice string literal has correct type" {
test "result location zero sized array inside struct field implicit cast to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const E = struct {
entries: []u32,
@@ -216,6 +249,9 @@ test "result location zero sized array inside struct field implicit cast to slic
}
test "runtime safety lets us slice from len..len" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
var an_array = [_]u8{ 1, 2, 3 };
try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
}
@@ -225,6 +261,9 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
var len: u32 = 10;
var slice = buf[0..len];
@@ -232,6 +271,9 @@ test "C pointer" {
}
test "C pointer slice access" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
var buf: [10]u32 = [1]u32{42} ** 10;
const c_ptr = @ptrCast([*c]const u32, &buf);
@@ -245,6 +287,8 @@ test "C pointer slice access" {
}
test "comptime slices are disambiguated" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
try expect(sliceSum(&[_]u8{ 1, 2 }) == 3);
try expect(sliceSum(&[_]u8{ 3, 4 }) == 7);
}
@@ -258,6 +302,9 @@ fn sliceSum(comptime q: []const u8) i32 {
}
test "slice type with custom alignment" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const LazilyResolvedType = struct {
anything: i32,
};
@@ -269,6 +316,8 @@ test "slice type with custom alignment" {
}
test "obtaining a null terminated slice" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// here we have a normal array
@@ -294,6 +343,7 @@ test "obtaining a null terminated slice" {
test "empty array to slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -312,6 +362,9 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const S = struct {
fn doTheTest() !void {
var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -327,6 +380,7 @@ test "@ptrCast slice to pointer" {
test "slice syntax resulting in pointer-to-array" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -475,6 +529,7 @@ test "slice syntax resulting in pointer-to-array" {
test "type coercion of pointer to anon struct literal to pointer to slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
const U = union {
@@ -508,6 +563,7 @@ test "type coercion of pointer to anon struct literal to pointer to slice" {
test "array concat of slices gives slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var a: []const u8 = "aoeu";
@@ -519,6 +575,7 @@ test "array concat of slices gives slice" {
test "slice bounds in comptime concatenation" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bs = comptime blk: {
const b = "........1........";
@@ -535,6 +592,7 @@ test "slice bounds in comptime concatenation" {
test "slice sentinel access at comptime" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
{
const str0 = &[_:0]u8{ '1', '2', '3' };
From bd8d6a8342914974ca163fa75db0562d181f6d27 Mon Sep 17 00:00:00 2001
From: m
Date: Fri, 11 Feb 2022 15:28:36 +0100
Subject: [PATCH 0128/2031] std: validate frame-pointer address in stack
walking
---
lib/std/c.zig | 1 +
lib/std/c/linux.zig | 1 +
lib/std/debug.zig | 24 +++++++++++++++++++++++-
lib/std/os.zig | 14 ++++++++++++++
lib/std/os/linux.zig | 10 ++++++++++
5 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 6d904c8b98..396a6f6702 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -123,6 +123,7 @@ pub extern "c" fn write(fd: c.fd_t, buf: [*]const u8, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: c.fd_t, buf: [*]const u8, nbyte: usize, offset: c.off_t) isize;
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: c.fd_t, offset: c.off_t) *anyopaque;
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
+pub extern "c" fn msync(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: c_int) c_int;
pub extern "c" fn linkat(oldfd: c.fd_t, oldpath: [*:0]const u8, newfd: c.fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig
index db55b5d850..be136bcbea 100644
--- a/lib/std/c/linux.zig
+++ b/lib/std/c/linux.zig
@@ -30,6 +30,7 @@ pub const MAP = struct {
/// Only used by libc to communicate failure.
pub const FAILED = @intToPtr(*anyopaque, maxInt(usize));
};
+pub const MSF = linux.MSF;
pub const MMAP2_UNIT = linux.MMAP2_UNIT;
pub const MSG = linux.MSG;
pub const NAME_MAX = linux.NAME_MAX;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 69a68faad6..be47985041 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -424,6 +424,28 @@ pub const StackIterator = struct {
return address;
}
+ fn isValidMemory(address: u64) bool {
+ if (native_os != .windows) {
+ var res = true;
+ const length = 2 * mem.page_size;
+ const aligned_address = address & ~@intCast(u64, (mem.page_size - 1));
+ const aligned_memory = @intToPtr([*]align(mem.page_size) u8, aligned_address)[0..length];
+
+ os.msync(aligned_memory, os.MSF.ASYNC) catch |err| {
+ switch (err) {
+ os.MSyncError.UnmappedMemory => {
+ res = false;
+ },
+ else => unreachable,
+ }
+ };
+ return res;
+ } else {
+ // TODO: Using windows memory API check if a page is mapped
+ return true;
+ }
+ }
+
fn next_internal(self: *StackIterator) ?usize {
const fp = if (comptime native_arch.isSPARC())
// On SPARC the offset is positive. (!)
@@ -432,7 +454,7 @@ pub const StackIterator = struct {
math.sub(usize, self.fp, fp_offset) catch return null;
// Sanity check.
- if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)))
+ if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp))
return null;
const new_fp = math.add(usize, @intToPtr(*const usize, fp).*, fp_bias) catch return null;
diff --git a/lib/std/os.zig b/lib/std/os.zig
index a935cfd8c0..16a32766dc 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -88,6 +88,7 @@ pub const Kevent = system.Kevent;
pub const LOCK = system.LOCK;
pub const MADV = system.MADV;
pub const MAP = system.MAP;
+pub const MSF = system.MSF;
pub const MAX_ADDR_LEN = system.MAX_ADDR_LEN;
pub const MMAP2_UNIT = system.MMAP2_UNIT;
pub const MSG = system.MSG;
@@ -4016,6 +4017,19 @@ pub fn munmap(memory: []align(mem.page_size) const u8) void {
}
}
+pub const MSyncError = error{
+ UnmappedMemory,
+} || UnexpectedError;
+
+pub fn msync(memory: []align(mem.page_size) u8, flags: i32) MSyncError!void {
+ switch (errno(system.msync(memory.ptr, memory.len, flags))) {
+ .SUCCESS => return,
+ .NOMEM => return error.UnmappedMemory, // Unsuccessful, provided pointer does not point mapped memory
+ .INVAL => unreachable, // Invalid parameters.
+ else => unreachable,
+ }
+}
+
pub const AccessError = error{
PermissionDenied,
FileNotFound,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index c1591f7ea1..608db08a9c 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -406,6 +406,16 @@ pub fn mprotect(address: [*]const u8, length: usize, protection: usize) usize {
return syscall3(.mprotect, @ptrToInt(address), length, protection);
}
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
+pub fn msync(address: [*]const u8, length: usize, flags: u32) usize {
+ return syscall3(.msync, @ptrToInt(address), length, flags);
+}
+
pub fn munmap(address: [*]const u8, length: usize) usize {
return syscall2(.munmap, @ptrToInt(address), length);
}
From 70d7f87be00aa1a372c856759948fd62666be295 Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Thu, 10 Feb 2022 13:29:48 -0700
Subject: [PATCH 0129/2031] Fix up sign handling and add arbitrary-length
integer support to @bitCast()
---
lib/std/math/big/int.zig | 174 +++++++++++++++++++++++++++-------
lib/std/math/big/int_test.zig | 25 +++++
src/value.zig | 5 +-
test/behavior/bitcast.zig | 107 ++++++++++++++++++++-
4 files changed, 270 insertions(+), 41 deletions(-)
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 87a62bf66c..1c6404fb3c 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1,4 +1,5 @@
const std = @import("../../std.zig");
+const builtin = @import("builtin");
const math = std.math;
const Limb = std.math.big.Limb;
const limb_bits = @typeInfo(Limb).Int.bits;
@@ -14,6 +15,7 @@ const minInt = std.math.minInt;
const assert = std.debug.assert;
const Endian = std.builtin.Endian;
const Signedness = std.builtin.Signedness;
+const native_endian = builtin.cpu.arch.endian();
const debug_safety = false;
@@ -1621,6 +1623,15 @@ pub const Mutable = struct {
}
}
+ /// Read the value of `x` from `buffer`
+ /// Asserts that `buffer` and `bit_count` are large enough to store the value.
+ ///
+ /// For integers with a well-defined layout (e.g. all power-of-two integers), this function
+ /// reads from `buffer` as if it were the contents of @ptrCast([]const u8, &x), where the
+ /// slice length is taken to be @sizeOf(std.meta.Int(signedness, ))
+ ///
+ /// For integers with a non-well-defined layout, `buffer` must have been created by
+ /// writeTwosComplement.
pub fn readTwosComplement(
x: *Mutable,
buffer: []const u8,
@@ -1634,26 +1645,77 @@ pub const Mutable = struct {
x.positive = true;
return;
}
- // zig fmt: off
- switch (signedness) {
- .signed => {
- if (bit_count <= 8) return x.set(mem.readInt( i8, buffer[0.. 1], endian));
- if (bit_count <= 16) return x.set(mem.readInt( i16, buffer[0.. 2], endian));
- if (bit_count <= 32) return x.set(mem.readInt( i32, buffer[0.. 4], endian));
- if (bit_count <= 64) return x.set(mem.readInt( i64, buffer[0.. 8], endian));
- if (bit_count <= 128) return x.set(mem.readInt(i128, buffer[0..16], endian));
- },
- .unsigned => {
- if (bit_count <= 8) return x.set(mem.readInt( u8, buffer[0.. 1], endian));
- if (bit_count <= 16) return x.set(mem.readInt( u16, buffer[0.. 2], endian));
- if (bit_count <= 32) return x.set(mem.readInt( u32, buffer[0.. 4], endian));
- if (bit_count <= 64) return x.set(mem.readInt( u64, buffer[0.. 8], endian));
- if (bit_count <= 128) return x.set(mem.readInt(u128, buffer[0..16], endian));
- },
- }
- // zig fmt: on
- @panic("TODO implement std lib big int readTwosComplement");
+ // byte_count is the total amount of bytes to read from buffer
+ var byte_count = @sizeOf(Limb) * (bit_count / @bitSizeOf(Limb));
+ if (bit_count % @bitSizeOf(Limb) != 0) { // Round up to a power-of-two integer <= Limb
+ byte_count += (std.math.ceilPowerOfTwoAssert(usize, bit_count % @bitSizeOf(Limb)) + 7) / 8;
+ }
+
+ const limb_count = calcTwosCompLimbCount(8 * byte_count);
+
+ // Check whether the input is negative
+ var positive = true;
+ if (signedness == .signed) {
+ var last_byte = switch (endian) {
+ .Little => ((bit_count + 7) / 8) - 1,
+ .Big => byte_count - ((bit_count + 7) / 8),
+ };
+
+ const sign_bit = @as(u8, 1) << @intCast(u3, (bit_count - 1) % 8);
+ positive = ((buffer[last_byte] & sign_bit) == 0);
+ }
+
+ // Copy all complete limbs
+ var carry: u1 = if (positive) 0 else 1;
+ var limb_index: usize = 0;
+ while (limb_index < bit_count / @bitSizeOf(Limb)) : (limb_index += 1) {
+ var buf_index = switch (endian) {
+ .Little => @sizeOf(Limb) * limb_index,
+ .Big => byte_count - (limb_index + 1) * @sizeOf(Limb),
+ };
+
+ const limb_buf = @ptrCast(*const [@sizeOf(Limb)]u8, buffer[buf_index..]);
+ var limb = mem.readInt(Limb, limb_buf, endian);
+
+ // 2's complement (bitwise not, then add carry bit)
+ if (!positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb));
+ x.limbs[limb_index] = limb;
+ }
+
+ // Copy any remaining bytes, using the nearest power-of-two integer that is large enough
+ const bits_left = @intCast(Log2Limb, bit_count % @bitSizeOf(Limb));
+ if (bits_left != 0) {
+ const bytes_read = limb_index * @sizeOf(Limb);
+ const bytes_left = byte_count - bytes_read;
+ var buffer_left = switch (endian) {
+ .Little => buffer[bytes_read..],
+ .Big => buffer[0..],
+ };
+
+ var limb = @intCast(Limb, blk: {
+ // zig fmt: off
+ if (bytes_left == 1) break :blk mem.readInt( u8, buffer_left[0.. 1], endian);
+ if (bytes_left == 2) break :blk mem.readInt( u16, buffer_left[0.. 2], endian);
+ if (bytes_left == 4) break :blk mem.readInt( u32, buffer_left[0.. 4], endian);
+ if (bytes_left == 8) break :blk mem.readInt( u64, buffer_left[0.. 8], endian);
+ if (bytes_left == 16) break :blk mem.readInt(u128, buffer_left[0..16], endian);
+ // zig fmt: on
+ unreachable;
+ });
+
+ // 2's complement (bitwise not, then add carry bit)
+ if (!positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
+
+ // Mask off any unused bits
+ const mask = (@as(Limb, 1) << bits_left) -% 1; // 0b0..01..1 with (bits_left) trailing ones
+ limb &= mask;
+
+ x.limbs[limb_count - 1] = limb;
+ }
+ x.positive = positive;
+ x.len = limb_count;
+ x.normalize(x.len);
}
/// Normalize a possible sequence of leading zeros.
@@ -1806,7 +1868,7 @@ pub const Const = struct {
.Int => |info| {
const UT = std.meta.Int(.unsigned, info.bits);
- if (self.bitCountTwosComp() > info.bits) {
+ if (!self.fitsInTwosComp(info.signedness, info.bits)) {
return error.TargetTooSmall;
}
@@ -2013,27 +2075,69 @@ pub const Const = struct {
return s.len;
}
+ /// Write the value of `x` into `buffer`
/// Asserts that `buffer` and `bit_count` are large enough to store the value.
+ ///
+ /// For integers with a well-defined layout (e.g. all power-of-two integers), this function
+ /// can be thought of as writing to `buffer` the contents of @ptrCast([]const u8, &x),
+ /// where the slice length is taken to be @sizeOf(std.meta.Int(_,))
+ ///
+ /// For integers with a non-well-defined layout, the only requirement is that readTwosComplement
+ /// on the same buffer creates an equivalent big integer.
pub fn writeTwosComplement(x: Const, buffer: []u8, bit_count: usize, endian: Endian) void {
if (bit_count == 0) return;
- // zig fmt: off
- if (x.positive) {
- if (bit_count <= 8) return mem.writeInt( u8, buffer[0.. 1], x.to( u8) catch unreachable, endian);
- if (bit_count <= 16) return mem.writeInt( u16, buffer[0.. 2], x.to( u16) catch unreachable, endian);
- if (bit_count <= 32) return mem.writeInt( u32, buffer[0.. 4], x.to( u32) catch unreachable, endian);
- if (bit_count <= 64) return mem.writeInt( u64, buffer[0.. 8], x.to( u64) catch unreachable, endian);
- if (bit_count <= 128) return mem.writeInt(u128, buffer[0..16], x.to(u128) catch unreachable, endian);
- } else {
- if (bit_count <= 8) return mem.writeInt( i8, buffer[0.. 1], x.to( i8) catch unreachable, endian);
- if (bit_count <= 16) return mem.writeInt( i16, buffer[0.. 2], x.to( i16) catch unreachable, endian);
- if (bit_count <= 32) return mem.writeInt( i32, buffer[0.. 4], x.to( i32) catch unreachable, endian);
- if (bit_count <= 64) return mem.writeInt( i64, buffer[0.. 8], x.to( i64) catch unreachable, endian);
- if (bit_count <= 128) return mem.writeInt(i128, buffer[0..16], x.to(i128) catch unreachable, endian);
+ var byte_count = @sizeOf(Limb) * (bit_count / @bitSizeOf(Limb));
+ if (bit_count % @bitSizeOf(Limb) != 0) {
+ byte_count += (std.math.ceilPowerOfTwoAssert(usize, bit_count % @bitSizeOf(Limb)) + 7) / 8;
}
- // zig fmt: on
+ assert(buffer.len >= byte_count);
+ assert(x.fitsInTwosComp(if (x.positive) .unsigned else .signed, bit_count));
- @panic("TODO implement std lib big int writeTwosComplement for larger than 128 bits");
+ // Copy all complete limbs
+ var carry: u1 = if (x.positive) 0 else 1;
+ var limb_index: usize = 0;
+ while (limb_index < byte_count / @sizeOf(Limb)) : (limb_index += 1) {
+ var buf_index = switch (endian) {
+ .Little => @sizeOf(Limb) * limb_index,
+ .Big => byte_count - (limb_index + 1) * @sizeOf(Limb),
+ };
+
+ var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
+ // 2's complement (bitwise not, then add carry bit)
+ if (!x.positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb));
+
+ var limb_buf = @ptrCast(*[@sizeOf(Limb)]u8, buffer[buf_index..]);
+ mem.writeInt(Limb, limb_buf, limb, endian);
+ }
+
+ // Copy any remaining bytes
+ if (byte_count % @sizeOf(Limb) != 0) {
+ const bytes_read = limb_index * @sizeOf(Limb);
+ const bytes_left = byte_count - bytes_read;
+ var buffer_left = switch (endian) {
+ .Little => buffer[bytes_read..],
+ .Big => buffer[0..],
+ };
+
+ var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
+ // 2's complement (bitwise not, then add carry bit)
+ if (!x.positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
+
+ if (bytes_left == 1) {
+ mem.writeInt(u8, buffer_left[0..1], @truncate(u8, limb), endian);
+ } else if (@sizeOf(Limb) > 1 and bytes_left == 2) {
+ mem.writeInt(u16, buffer_left[0..2], @truncate(u16, limb), endian);
+ } else if (@sizeOf(Limb) > 2 and bytes_left == 4) {
+ mem.writeInt(u32, buffer_left[0..4], @truncate(u32, limb), endian);
+ } else if (@sizeOf(Limb) > 4 and bytes_left == 8) {
+ mem.writeInt(u64, buffer_left[0..8], @truncate(u64, limb), endian);
+ } else if (@sizeOf(Limb) > 8 and bytes_left == 16) {
+ mem.writeInt(u128, buffer_left[0..16], @truncate(u128, limb), endian);
+ } else if (@sizeOf(Limb) > 16) {
+ @compileError("@sizeOf(Limb) exceeded supported range");
+ } else unreachable;
+ }
}
/// Returns `math.Order.lt`, `math.Order.eq`, `math.Order.gt` if
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 70a9b97a38..f6f210f56c 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -2486,3 +2486,28 @@ test "big int popcount" {
try testing.expect(a.toConst().orderAgainstScalar(16) == .eq);
}
+
+test "big int conversion read/write twos complement" {
+ var a = try Managed.initSet(testing.allocator, (1 << 493) - 1);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, (1 << 493) - 1);
+ defer b.deinit();
+ var m = b.toMutable();
+
+ var buffer1 = try testing.allocator.alloc(u8, 64);
+ defer testing.allocator.free(buffer1);
+
+ const endians = [_]std.builtin.Endian{ .Little, .Big };
+
+ for (endians) |endian| {
+ // Writing to buffer and back should not change anything
+ a.toConst().writeTwosComplement(buffer1, 493, endian);
+ m.readTwosComplement(buffer1, 493, endian, .unsigned);
+ try testing.expect(m.toConst().order(a.toConst()) == .eq);
+
+ // Equivalent to @bitCast(i493, @as(u493, intMax(u493))
+ a.toConst().writeTwosComplement(buffer1, 493, endian);
+ m.readTwosComplement(buffer1, 493, endian, .signed);
+ try testing.expect(m.toConst().orderAgainstScalar(-1) == .eq);
+ }
+}
diff --git a/src/value.zig b/src/value.zig
index 33a75e08bb..2018eb3df3 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1093,8 +1093,9 @@ pub const Value = extern union {
.Int => {
const int_info = ty.intInfo(target);
const endian = target.cpu.arch.endian();
- // TODO use a correct amount of limbs
- const limbs_buffer = try arena.alloc(std.math.big.Limb, 2);
+ const Limb = std.math.big.Limb;
+ const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
+ const limbs_buffer = try arena.alloc(Limb, limb_count);
var bigint = BigIntMutable.init(limbs_buffer, 0);
bigint.readTwosComplement(buffer, int_info.bits, endian, int_info.signedness);
return fromBigInt(arena, bigint.toConst());
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index d56e3c1c53..43d6524a4e 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
+const minInt = std.math.minInt;
const native_endian = builtin.target.cpu.arch.endian();
test "@bitCast i32 -> u32" {
@@ -11,21 +12,119 @@ test "@bitCast i32 -> u32" {
}
fn testBitCast_i32_u32() !void {
- try expect(conv(-1) == maxInt(u32));
- try expect(conv2(maxInt(u32)) == -1);
+ try expect(conv_i32(-1) == maxInt(u32));
+ try expect(conv_u32(maxInt(u32)) == -1);
+ try expect(conv_u32(0x8000_0000) == minInt(i32));
+ try expect(conv_i32(minInt(i32)) == 0x8000_0000);
}
-fn conv(x: i32) u32 {
+fn conv_i32(x: i32) u32 {
return @bitCast(u32, x);
}
-fn conv2(x: u32) i32 {
+fn conv_u32(x: u32) i32 {
return @bitCast(i32, x);
}
+test "@bitCast i48 -> u48" {
+ try testBitCast_i48_u48();
+ comptime try testBitCast_i48_u48();
+}
+
+fn testBitCast_i48_u48() !void {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ try expect(conv_i48(-1) == maxInt(u48));
+ try expect(conv_u48(maxInt(u48)) == -1);
+ try expect(conv_u48(0x8000_0000_0000) == minInt(i48));
+ try expect(conv_i48(minInt(i48)) == 0x8000_0000_0000);
+}
+
+fn conv_i48(x: i48) u48 {
+ return @bitCast(u48, x);
+}
+
+fn conv_u48(x: u48) i48 {
+ return @bitCast(i48, x);
+}
+
+test "@bitCast i27 -> u27" {
+ try testBitCast_i27_u27();
+ comptime try testBitCast_i27_u27();
+}
+
+fn testBitCast_i27_u27() !void {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ try expect(conv_i27(-1) == maxInt(u27));
+ try expect(conv_u27(maxInt(u27)) == -1);
+ try expect(conv_u27(0x400_0000) == minInt(i27));
+ try expect(conv_i27(minInt(i27)) == 0x400_0000);
+}
+
+fn conv_i27(x: i27) u27 {
+ return @bitCast(u27, x);
+}
+
+fn conv_u27(x: u27) i27 {
+ return @bitCast(i27, x);
+}
+
+test "@bitCast i512 -> u512" {
+ try testBitCast_i512_u512();
+ comptime try testBitCast_i512_u512();
+}
+
+fn testBitCast_i512_u512() !void {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ try expect(conv_i512(-1) == maxInt(u512));
+ try expect(conv_u512(maxInt(u512)) == -1);
+ try expect(conv_u512(@as(u512, 1) << 511) == minInt(i512));
+ try expect(conv_i512(minInt(i512)) == (@as(u512, 1) << 511));
+}
+
+fn conv_i512(x: i512) u512 {
+ return @bitCast(u512, x);
+}
+
+fn conv_u512(x: u512) i512 {
+ return @bitCast(i512, x);
+}
+
test "bitcast result to _" {
_ = @bitCast(u8, @as(i8, 1));
}
+test "@bitCast i493 -> u493" {
+ try testBitCast_i493_u493();
+ comptime try testBitCast_i493_u493();
+}
+
+fn testBitCast_i493_u493() !void {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ try expect(conv_i493(-1) == maxInt(u493));
+ try expect(conv_u493(maxInt(u493)) == -1);
+ try expect(conv_u493(@as(u493, 1) << 492) == minInt(i493));
+ try expect(conv_i493(minInt(i493)) == (@as(u493, 1) << 492));
+}
+
+fn conv_i493(x: i493) u493 {
+ return @bitCast(u493, x);
+}
+
+fn conv_u493(x: u493) i493 {
+ return @bitCast(i493, x);
+}
+
test "nested bitcast" {
const S = struct {
fn moo(x: isize) !void {
From eeb043f5833db03ac3250f9943ba8be0b518432f Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Fri, 11 Feb 2022 08:46:01 -0700
Subject: [PATCH 0130/2031] Fix big-endian handling in stage1
bigint_write_twos_complement
---
src/stage1/bigint.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/stage1/bigint.cpp b/src/stage1/bigint.cpp
index eab0f037cf..e10af08bc4 100644
--- a/src/stage1/bigint.cpp
+++ b/src/stage1/bigint.cpp
@@ -313,12 +313,12 @@ void bigint_write_twos_complement(const BigInt *big_int, uint8_t *buf, size_t bi
}
if (digit_index == 0) break;
- digit_index -= 1;
if (digit_index == last_digit_index) {
buf_index += bytes_in_last_digit;
} else {
buf_index += 8;
}
+ digit_index -= 1;
}
} else {
size_t digit_count = (bit_count + 63) / 64;
From 65299c37d1b4b4395616d6f86b5f064000951cf6 Mon Sep 17 00:00:00 2001
From: m
Date: Fri, 11 Feb 2022 20:11:23 +0100
Subject: [PATCH 0131/2031] validate in Windows using VirtualQuery
---
lib/std/c/darwin.zig | 6 ++++
lib/std/c/dragonfly.zig | 6 ++++
lib/std/c/freebsd.zig | 6 ++++
lib/std/c/netbsd.zig | 6 ++++
lib/std/c/openbsd.zig | 6 ++++
lib/std/c/solaris.zig | 6 ++++
lib/std/debug.zig | 56 +++++++++++++++++++++++----------
lib/std/os/linux.zig | 4 +--
lib/std/os/windows.zig | 32 +++++++++++++++++++
lib/std/os/windows/kernel32.zig | 2 ++
10 files changed, 112 insertions(+), 18 deletions(-)
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index f4ca9cd6dd..091f77e937 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -636,6 +636,12 @@ pub const MAP = struct {
pub const FAILED = @intToPtr(*anyopaque, maxInt(usize));
};
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
pub const SA = struct {
/// take signal on signal stack
pub const ONSTACK = 0x0001;
diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig
index d43c8dd239..75e84f637d 100644
--- a/lib/std/c/dragonfly.zig
+++ b/lib/std/c/dragonfly.zig
@@ -185,6 +185,12 @@ pub const MAP = struct {
pub const SIZEALIGN = 262144;
};
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
pub const W = struct {
pub const NOHANG = 0x0001;
pub const UNTRACED = 0x0002;
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index a19ecd3bac..7b74c237ce 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -410,6 +410,12 @@ pub const MAP = struct {
pub const @"32BIT" = 0x00080000;
};
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
pub const W = struct {
pub const NOHANG = 1;
pub const UNTRACED = 2;
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index 4fbc7594b0..6507134d13 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -575,6 +575,12 @@ pub const MAP = struct {
pub const STACK = 0x2000;
};
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
pub const W = struct {
pub const NOHANG = 0x00000001;
pub const UNTRACED = 0x00000002;
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index 6ba11e8e5a..17187cf203 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -363,6 +363,12 @@ pub const MAP = struct {
pub const CONCEAL = 0x8000;
};
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
pub const W = struct {
pub const NOHANG = 1;
pub const UNTRACED = 2;
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index 5abb34d2ff..5f44dc5350 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -534,6 +534,12 @@ pub const MAP = struct {
pub const INITDATA = 0x0800;
};
+pub const MSF = struct {
+ pub const ASYNC = 1;
+ pub const INVALIDATE = 2;
+ pub const SYNC = 4;
+};
+
pub const MADV = struct {
/// no further special treatment
pub const NORMAL = 0;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index be47985041..c1571a70c6 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -424,24 +424,48 @@ pub const StackIterator = struct {
return address;
}
- fn isValidMemory(address: u64) bool {
- if (native_os != .windows) {
- var res = true;
- const length = 2 * mem.page_size;
- const aligned_address = address & ~@intCast(u64, (mem.page_size - 1));
- const aligned_memory = @intToPtr([*]align(mem.page_size) u8, aligned_address)[0..length];
+ fn isValidMemory(address: usize) bool {
+ const aligned_address = address & ~@intCast(usize, (mem.page_size - 1));
- os.msync(aligned_memory, os.MSF.ASYNC) catch |err| {
- switch (err) {
- os.MSyncError.UnmappedMemory => {
- res = false;
- },
- else => unreachable,
- }
- };
- return res;
+ // If the address does not span 2 pages, query only the first one
+ const length: usize = if (aligned_address == address) mem.page_size else 2 * mem.page_size;
+
+ const aligned_memory = @intToPtr([*]align(mem.page_size) u8, aligned_address)[0..length];
+
+ if (native_os != .windows) {
+ if (native_os != .wasi) {
+ os.msync(aligned_memory, os.MSF.ASYNC) catch |err| {
+ switch (err) {
+ os.MSyncError.UnmappedMemory => {
+ return false;
+ },
+ else => unreachable,
+ }
+ };
+ }
+
+ return true;
} else {
- // TODO: Using windows memory API check if a page is mapped
+ const w = os.windows;
+ var memory_info: w.MEMORY_BASIC_INFORMATION = undefined;
+ //const memory_info_ptr = @ptrCast(w.PMEMORY_BASIC_INFORMATION, buffer);
+
+ // The only error this function can throw is ERROR_INVALID_PARAMETER.
+ // supply an address that invalid i'll be thrown.
+ const rc = w.VirtualQuery(aligned_memory.ptr, &memory_info, aligned_memory.len) catch {
+ return false;
+ };
+
+ // Result code has to be bigger than zero (number of bytes written)
+ if (rc == 0) {
+ return false;
+ }
+
+ // Free pages cannot be read, they are unmapped
+ if (memory_info.State == w.MEM_FREE) {
+ return false;
+ }
+
return true;
}
}
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 608db08a9c..b6c8285b52 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -412,8 +412,8 @@ pub const MSF = struct {
pub const SYNC = 4;
};
-pub fn msync(address: [*]const u8, length: usize, flags: u32) usize {
- return syscall3(.msync, @ptrToInt(address), length, flags);
+pub fn msync(address: [*]const u8, length: usize, flags: i32) usize {
+ return syscall3(.msync, @ptrToInt(address), length, @bitCast(u32, flags));
}
pub fn munmap(address: [*]const u8, length: usize) usize {
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index c832149577..31745cfe82 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -1495,6 +1495,19 @@ pub fn VirtualFree(lpAddress: ?LPVOID, dwSize: usize, dwFreeType: DWORD) void {
assert(kernel32.VirtualFree(lpAddress, dwSize, dwFreeType) != 0);
}
+pub const VirtualQuerryError = error{Unexpected};
+
+pub fn VirtualQuery(lpAddress: ?LPVOID, lpBuffer: PMEMORY_BASIC_INFORMATION, dwLength: SIZE_T) VirtualQuerryError!SIZE_T {
+ const rc = kernel32.VirtualQuery(lpAddress, lpBuffer, dwLength);
+ if (rc == 0) {
+ switch (kernel32.GetLastError()) {
+ else => |err| return unexpectedError(err),
+ }
+ }
+
+ return rc;
+}
+
pub const SetConsoleTextAttributeError = error{Unexpected};
pub fn SetConsoleTextAttribute(hConsoleOutput: HANDLE, wAttributes: WORD) SetConsoleTextAttributeError!void {
@@ -2586,6 +2599,11 @@ pub const CREATE_EVENT_MANUAL_RESET = 0x00000001;
pub const EVENT_ALL_ACCESS = 0x1F0003;
pub const EVENT_MODIFY_STATE = 0x0002;
+// MEMORY_BASIC_INFORMATION.Type flags for VirtualQuery
+pub const MEM_IMAGE = 0x1000000;
+pub const MEM_MAPPED = 0x40000;
+pub const MEM_PRIVATE = 0x20000;
+
pub const PROCESS_INFORMATION = extern struct {
hProcess: HANDLE,
hThread: HANDLE,
@@ -2661,6 +2679,7 @@ pub const HEAP_NO_SERIALIZE = 0x00000001;
// AllocationType values
pub const MEM_COMMIT = 0x1000;
pub const MEM_RESERVE = 0x2000;
+pub const MEM_FREE = 0x10000;
pub const MEM_RESET = 0x80000;
pub const MEM_RESET_UNDO = 0x1000000;
pub const MEM_LARGE_PAGES = 0x20000000;
@@ -2960,6 +2979,19 @@ pub const COINIT = enum(c_int) {
COINIT_SPEED_OVER_MEMORY = 8,
};
+pub const MEMORY_BASIC_INFORMATION = extern struct {
+ BaseAddress: PVOID,
+ AllocationBase: PVOID,
+ AllocationProtect: DWORD,
+ PartitionId: WORD,
+ RegionSize: SIZE_T,
+ State: DWORD,
+ Protect: DWORD,
+ Type: DWORD,
+};
+
+pub const PMEMORY_BASIC_INFORMATION = *MEMORY_BASIC_INFORMATION;
+
/// > The maximum path of 32,767 characters is approximate, because the "\\?\"
/// > prefix may be expanded to a longer string by the system at run time, and
/// > this expansion applies to the total length.
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index b602921648..e947d1e505 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -56,6 +56,7 @@ const LPOVERLAPPED_COMPLETION_ROUTINE = windows.LPOVERLAPPED_COMPLETION_ROUTINE;
const UCHAR = windows.UCHAR;
const FARPROC = windows.FARPROC;
const INIT_ONCE_FN = windows.INIT_ONCE_FN;
+const PMEMORY_BASIC_INFORMATION = windows.PMEMORY_BASIC_INFORMATION;
pub extern "kernel32" fn AddVectoredExceptionHandler(First: c_ulong, Handler: ?VECTORED_EXCEPTION_HANDLER) callconv(WINAPI) ?*anyopaque;
pub extern "kernel32" fn RemoveVectoredExceptionHandler(Handle: HANDLE) callconv(WINAPI) c_ulong;
@@ -245,6 +246,7 @@ pub extern "kernel32" fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: ?*co
pub extern "kernel32" fn VirtualAlloc(lpAddress: ?LPVOID, dwSize: SIZE_T, flAllocationType: DWORD, flProtect: DWORD) callconv(WINAPI) ?LPVOID;
pub extern "kernel32" fn VirtualFree(lpAddress: ?LPVOID, dwSize: SIZE_T, dwFreeType: DWORD) callconv(WINAPI) BOOL;
+pub extern "kernel32" fn VirtualQuery(lpAddress: ?LPVOID, lpBuffer: PMEMORY_BASIC_INFORMATION, dwLength: SIZE_T) callconv(WINAPI) SIZE_T;
pub extern "kernel32" fn LocalFree(hMem: HLOCAL) callconv(WINAPI) ?HLOCAL;
From a5a7f0ff0069d68ad60391dadce52321dd316138 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 11 Feb 2022 15:57:23 -0700
Subject: [PATCH 0132/2031] CI: upgrade x86_64-macos tarball to llvm 13.0.1
---
ci/azure/macos_script | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ci/azure/macos_script b/ci/azure/macos_script
index da0e279500..80fcf430cc 100755
--- a/ci/azure/macos_script
+++ b/ci/azure/macos_script
@@ -9,7 +9,7 @@ ZIGDIR="$(pwd)"
ARCH="x86_64"
TARGET="$ARCH-macos-gnu"
MCPU="baseline"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.9.0-dev.1249+210ef5af8"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.9.1-dev.90+d2398cf00"
PREFIX="$HOME/$CACHE_BASENAME"
JOBS="-j2"
From 2262640e8bf4cbcc4e6a46405d7a5a20562b8e47 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Fri, 11 Feb 2022 19:06:46 +0100
Subject: [PATCH 0133/2031] stage2 ARM: lower const slices
Follow-up to e1a535360fb9ed08fc48018571b9702ab12a5876 for ARM
This also fixes some stack offset calculation bugs
---
src/arch/arm/CodeGen.zig | 207 ++++++++++++++++++++++-----------------
test/behavior/basic.zig | 3 -
test/behavior/slice.zig | 17 ----
3 files changed, 119 insertions(+), 108 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 2c60027d97..e211f9d7bc 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -1170,10 +1170,10 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
.dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off };
+ break :result MCValue{ .stack_argument_offset = off + 4 };
},
.stack_offset => |off| {
- break :result MCValue{ .stack_offset = off };
+ break :result MCValue{ .stack_offset = off + 4 };
},
.memory => |addr| {
break :result MCValue{ .memory = addr };
@@ -1192,10 +1192,10 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
.dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off + 4 };
+ break :result MCValue{ .stack_argument_offset = off };
},
.stack_offset => |off| {
- break :result MCValue{ .stack_offset = off + 4 };
+ break :result MCValue{ .stack_offset = off };
},
.memory => |addr| {
break :result MCValue{ .memory = addr + 4 };
@@ -1260,7 +1260,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
const base_mcv: MCValue = switch (slice_mcv) {
- .stack_offset => .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, slice_mcv) },
+ .stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off + 4 }) },
else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
};
self.register_manager.freezeRegs(&.{base_mcv.register});
@@ -1471,36 +1471,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
- } else if (elem_size == 8) {
- // TODO generalize this: maybe add a
- // genArmMemcpy function which manually copies
- // data if the size is below a certain
- // threshold and calls "memcpy" if the size is
- // larger
-
- const usize_ty = Type.initTag(.usize);
- const tmp_regs = try self.register_manager.allocRegs(2, .{ null, null });
- self.register_manager.freezeRegs(&tmp_regs);
- defer self.register_manager.unfreezeRegs(&tmp_regs);
-
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = tmp_regs[0],
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = tmp_regs[1],
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.imm(4) },
- } },
- });
- try self.genSetStack(usize_ty, off, MCValue{ .register = tmp_regs[0] });
- try self.genSetStack(usize_ty, off + 4, MCValue{ .register = tmp_regs[1] });
} else {
// TODO optimize the register allocation
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
@@ -1677,7 +1647,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size };
},
- .stack_argument_offset => {
+ else => {
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
.immediate = struct_field_offset,
});
@@ -1699,7 +1669,6 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
);
break :result MCValue{ .register = dst_reg };
},
- else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
}
};
}
@@ -3200,9 +3169,9 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
- .ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
@@ -3215,23 +3184,16 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
else => return self.fail("TODO implement memset", .{}),
}
},
- .ptr_stack_offset => {
- const reg = try self.copyToTmpRegister(ty, mcv);
- return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
- },
.compare_flags_unsigned,
.compare_flags_signed,
.immediate,
+ .ptr_stack_offset,
+ .ptr_embedded_in_code,
=> {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
- .embedded_in_code => |code_offset| {
- _ = code_offset;
- return self.fail("TODO implement set stack variable from embedded_in_code", .{});
- },
.register => |reg| {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const adj_off = stack_offset + abi_size;
switch (abi_size) {
@@ -3279,24 +3241,23 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
}
},
.memory,
+ .embedded_in_code,
.stack_argument_offset,
+ .stack_offset,
=> {
- if (ty.abiSize(self.target.*) <= 4) {
- const reg = try self.copyToTmpRegister(ty, mcv);
- return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
- } else {
- return self.fail("TODO implement memcpy", .{});
+ switch (mcv) {
+ .stack_offset => |off| {
+ if (stack_offset == off)
+ return; // Copy stack variable to itself; nothing to do.
+ },
+ else => {},
}
- },
- .stack_offset => |off| {
- if (stack_offset == off)
- return; // Copy stack variable to itself; nothing to do.
- if (ty.abiSize(self.target.*) <= 4) {
+ if (abi_size <= 4) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- // TODO optimize the register allocation
+ // TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
const src_reg = regs[0];
const dst_reg = regs[1];
@@ -3304,22 +3265,31 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const count_reg = regs[3];
const tmp_reg = regs[4];
- // sub src_reg, fp, #off
- const adj_src_offset = off + @intCast(u32, ty.abiSize(self.target.*));
- const src_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_src_offset)) |x| x else {
- return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
- };
- _ = try self.addInst(.{
- .tag = .sub,
- .data = .{ .rr_op = .{
- .rd = src_reg,
- .rn = .fp,
- .op = src_offset_op,
- } },
- });
+ switch (mcv) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ const adj_src_offset = off + @intCast(u32, abi_size);
+ const src_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_src_offset)) |x| x else {
+ return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
+ };
+ _ = try self.addInst(.{
+ .tag = .sub,
+ .data = .{ .rr_op = .{
+ .rd = src_reg,
+ .rn = .fp,
+ .op = src_offset_op,
+ } },
+ });
+ },
+ .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .embedded_in_code,
+ .stack_argument_offset,
+ => return self.fail("TODO genSetStack with src={}", .{mcv}),
+ else => unreachable,
+ }
// sub dst_reg, fp, #stack_offset
- const adj_dst_offset = stack_offset + @intCast(u32, ty.abiSize(self.target.*));
+ const adj_dst_offset = stack_offset + abi_size;
const dst_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_dst_offset)) |x| x else {
return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
};
@@ -3332,9 +3302,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
} },
});
- // mov len, #elem_size
- const elem_size = @intCast(u32, ty.abiSize(self.target.*));
- const len_op: Instruction.Operand = if (Instruction.Operand.fromU32(elem_size)) |x| x else {
+ // mov len, #abi_size
+ const len_op: Instruction.Operand = if (Instruction.Operand.fromU32(abi_size)) |x| x else {
return self.fail("TODO load: set reg to elem_size with all possible sizes", .{});
};
_ = try self.addInst(.{
@@ -3619,6 +3588,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -3634,7 +3604,6 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
}
},
.register => |reg| {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const adj_off = stack_offset - abi_size;
switch (abi_size) {
@@ -3675,28 +3644,86 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
}
},
- .immediate,
- .compare_flags_signed,
- .compare_flags_unsigned,
.stack_offset,
.memory,
.stack_argument_offset,
.embedded_in_code,
=> {
- if (ty.abiSize(self.target.*) <= 4) {
+ if (abi_size <= 4) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
} else {
- return self.fail("TODO implement memcpy", .{});
+ // TODO call extern memcpy
+ const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
+ const src_reg = regs[0];
+ const dst_reg = regs[1];
+ const len_reg = regs[2];
+ const count_reg = regs[3];
+ const tmp_reg = regs[4];
+
+ switch (mcv) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ const adj_src_offset = off + abi_size;
+ const src_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_src_offset)) |x| x else {
+ return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
+ };
+ _ = try self.addInst(.{
+ .tag = .sub,
+ .data = .{ .rr_op = .{
+ .rd = src_reg,
+ .rn = .fp,
+ .op = src_offset_op,
+ } },
+ });
+ },
+ .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .stack_argument_offset,
+ .embedded_in_code,
+ => return self.fail("TODO genSetStackArgument src={}", .{mcv}),
+ else => unreachable,
+ }
+
+ // add dst_reg, sp, #stack_offset
+ const adj_dst_offset = stack_offset - abi_size;
+ const dst_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_dst_offset)) |x| x else {
+ return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
+ };
+ _ = try self.addInst(.{
+ .tag = .add,
+ .data = .{ .rr_op = .{
+ .rd = dst_reg,
+ .rn = .sp,
+ .op = dst_offset_op,
+ } },
+ });
+
+ // mov len, #abi_size
+ const len_op: Instruction.Operand = if (Instruction.Operand.fromU32(abi_size)) |x| x else {
+ return self.fail("TODO load: set reg to elem_size with all possible sizes", .{});
+ };
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .data = .{ .rr_op = .{
+ .rd = len_reg,
+ .rn = .r0,
+ .op = len_op,
+ } },
+ });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
- .ptr_stack_offset => {
+ .compare_flags_unsigned,
+ .compare_flags_signed,
+ .immediate,
+ .ptr_stack_offset,
+ .ptr_embedded_in_code,
+ => {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
},
- .ptr_embedded_in_code => {
- return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
- },
}
}
@@ -4114,9 +4141,13 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (param_types) |ty, i| {
- stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, ty.abiAlignment(self.target.*));
- result.args[i] = .{ .stack_argument_offset = stack_offset };
- stack_offset += @intCast(u32, ty.abiSize(self.target.*));
+ if (ty.abiSize(self.target.*) > 0) {
+ stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, ty.abiAlignment(self.target.*));
+ result.args[i] = .{ .stack_argument_offset = stack_offset };
+ stack_offset += @intCast(u32, ty.abiSize(self.target.*));
+ } else {
+ result.args[i] = .{ .none = {} };
+ }
}
result.stack_byte_count = stack_offset;
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 5d7bb3b9a7..18a24f9b3a 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -216,7 +216,6 @@ test "compile time global reinterpret" {
}
test "cast undefined" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const array: [100]u8 = undefined;
@@ -678,7 +677,6 @@ test "string concatenation" {
}
test "thread local variable" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
@@ -704,7 +702,6 @@ fn maybe(x: bool) anyerror!?u32 {
}
test "pointer to thread local array" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 327b8f4f76..badaf7ef03 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -48,7 +48,6 @@ test "slicing" {
test "const slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
const a = "1234567890";
@@ -61,7 +60,6 @@ test "const slice" {
test "comptime slice of undefined pointer of length 0" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const slice1 = @as([*]i32, undefined)[0..0];
try expect(slice1.len == 0);
@@ -83,7 +81,6 @@ fn assertLenIsZero(msg: []const u8) !void {
test "access len index of sentinel-terminated slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -99,7 +96,6 @@ test "access len index of sentinel-terminated slice" {
test "comptime slice of slice preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var buff: [10]u8 = undefined;
@@ -110,7 +106,6 @@ test "comptime slice of slice preserves comptime var" {
test "slice of type" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var types_array = [_]type{ i32, f64, type };
@@ -151,7 +146,6 @@ fn memFree(comptime T: type, memory: []T) void {
test "slice of hardcoded address to pointer" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -168,7 +162,6 @@ test "slice of hardcoded address to pointer" {
test "comptime slice of pointer preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var buff: [10]u8 = undefined;
@@ -180,7 +173,6 @@ test "comptime slice of pointer preserves comptime var" {
test "comptime pointer cast array and then slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
@@ -239,7 +231,6 @@ test "slice string literal has correct type" {
test "result location zero sized array inside struct field implicit cast to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const E = struct {
entries: []u32,
@@ -287,8 +278,6 @@ test "C pointer slice access" {
}
test "comptime slices are disambiguated" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-
try expect(sliceSum(&[_]u8{ 1, 2 }) == 3);
try expect(sliceSum(&[_]u8{ 3, 4 }) == 7);
}
@@ -343,7 +332,6 @@ test "obtaining a null terminated slice" {
test "empty array to slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -380,7 +368,6 @@ test "@ptrCast slice to pointer" {
test "slice syntax resulting in pointer-to-array" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -529,7 +516,6 @@ test "slice syntax resulting in pointer-to-array" {
test "type coercion of pointer to anon struct literal to pointer to slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
const U = union {
@@ -563,7 +549,6 @@ test "type coercion of pointer to anon struct literal to pointer to slice" {
test "array concat of slices gives slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var a: []const u8 = "aoeu";
@@ -575,7 +560,6 @@ test "array concat of slices gives slice" {
test "slice bounds in comptime concatenation" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bs = comptime blk: {
const b = "........1........";
@@ -592,7 +576,6 @@ test "slice bounds in comptime concatenation" {
test "slice sentinel access at comptime" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
{
const str0 = &[_:0]u8{ '1', '2', '3' };
From 3fdbc3bba859596d70251d05ccafd2ad1bfc962d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 12 Feb 2022 01:06:20 -0700
Subject: [PATCH 0134/2031] CI: upgrade x86_64-macos tarball to llvm 13.0.1
---
ci/azure/macos_arm64_script | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ci/azure/macos_arm64_script b/ci/azure/macos_arm64_script
index b8d1437e78..835ee59c90 100755
--- a/ci/azure/macos_arm64_script
+++ b/ci/azure/macos_arm64_script
@@ -10,7 +10,7 @@ ZIGDIR="$(pwd)"
HOST_ARCH="x86_64"
HOST_TARGET="$HOST_ARCH-macos-gnu"
HOST_MCPU="baseline"
-HOST_CACHE_BASENAME="zig+llvm+lld+clang-$HOST_TARGET-0.9.0-dev.1249+210ef5af8"
+HOST_CACHE_BASENAME="zig+llvm+lld+clang-$HOST_TARGET-0.9.1-dev.90+d2398cf00"
HOST_PREFIX="$HOME/$HOST_CACHE_BASENAME"
ARCH="aarch64"
From 1072d8a0658d76415124ef6aac6842283316f243 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 12 Feb 2022 11:01:10 +0100
Subject: [PATCH 0135/2031] CI: upgrade both host and target tarballs to llvm
13.0.1 x86_64-macos
---
ci/azure/macos_arm64_script | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ci/azure/macos_arm64_script b/ci/azure/macos_arm64_script
index 835ee59c90..a37faba001 100755
--- a/ci/azure/macos_arm64_script
+++ b/ci/azure/macos_arm64_script
@@ -16,7 +16,7 @@ HOST_PREFIX="$HOME/$HOST_CACHE_BASENAME"
ARCH="aarch64"
TARGET="$ARCH-macos-gnu"
MCPU="apple_a14"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.9.0-dev.1249+210ef5af8"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.9.1-dev.90+d2398cf00"
PREFIX="$HOME/$CACHE_BASENAME"
JOBS="-j2"
From beb275b371cd50ee1d57528d2792f2f267e6013d Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 12 Feb 2022 11:17:41 +0100
Subject: [PATCH 0136/2031] Revert "CI: upgrade both host and target tarballs
to llvm 13.0.1 x86_64-macos"
This reverts commit 1072d8a0658d76415124ef6aac6842283316f243.
---
ci/azure/macos_arm64_script | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ci/azure/macos_arm64_script b/ci/azure/macos_arm64_script
index a37faba001..835ee59c90 100755
--- a/ci/azure/macos_arm64_script
+++ b/ci/azure/macos_arm64_script
@@ -16,7 +16,7 @@ HOST_PREFIX="$HOME/$HOST_CACHE_BASENAME"
ARCH="aarch64"
TARGET="$ARCH-macos-gnu"
MCPU="apple_a14"
-CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.9.1-dev.90+d2398cf00"
+CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.9.0-dev.1249+210ef5af8"
PREFIX="$HOME/$CACHE_BASENAME"
JOBS="-j2"
From f293fbbeaf6c48e6ce1410743181f89f359eb697 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 15:42:57 -0700
Subject: [PATCH 0137/2031] stage2: LLVM backend: adjust replaceAllUsesWith
usage
replaceAllUsesWith requires the type to be unchanged. So we bitcast
the new global to the old type and use that as the thing to replace
old uses.
Fixes an LLVM assertion found while troubleshooting #10837.
---
src/codegen/llvm.zig | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index d85a16d16f..425808efb1 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -661,7 +661,11 @@ pub const DeclGen = struct {
new_global.setUnnamedAddr(global.getUnnamedAddress());
new_global.setAlignment(global.getAlignment());
new_global.setInitializer(llvm_init);
- global.replaceAllUsesWith(new_global);
+ // replaceAllUsesWith requires the type to be unchanged. So we bitcast
+ // the new global to the old type and use that as the thing to replace
+ // old uses.
+ const new_global_ptr = new_global.constBitCast(global.typeOf());
+ global.replaceAllUsesWith(new_global_ptr);
dg.object.decl_map.putAssumeCapacity(decl, new_global);
new_global.takeName(global);
global.deleteGlobal();
From 166db1a3ed7eca9b04b0626eaea8de0634ab9667 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 17:36:31 -0700
Subject: [PATCH 0138/2031] stage1: fix f80 size and alignment on x86 and arm
* F80Repr extern struct needs no explicit padding; let's match the
target padding.
* stage2: fix lowering of f80 constants.
* stage1: decide ABI size and alignment of f80 based on alignment of
u64. x86 has alignof u64 equal to 4 but arm has it as 8.
* stage2: fix Value.floatReadFromMemory to use F80Repr
---
lib/std/math.zig | 7 ++-----
src/codegen/llvm.zig | 34 ++++++++++++++++++++++------------
src/stage1/codegen.cpp | 22 +++++++++++++++++-----
src/value.zig | 30 ++++++++++++++++++++++++++----
4 files changed, 67 insertions(+), 26 deletions(-)
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 6802d420fd..8398842e28 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -36,7 +36,6 @@ pub const sqrt2 = 1.414213562373095048801688724209698079;
/// 1/sqrt(2)
pub const sqrt1_2 = 0.707106781186547524400844362104849039;
-// From a small c++ [program using boost float128](https://github.com/winksaville/cpp_boost_float128)
pub const f128_true_min = @bitCast(f128, @as(u128, 0x00000000000000000000000000000001));
pub const f128_min = @bitCast(f128, @as(u128, 0x00010000000000000000000000000000));
pub const f128_max = @bitCast(f128, @as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF));
@@ -44,12 +43,10 @@ pub const f128_epsilon = @bitCast(f128, @as(u128, 0x3F8F000000000000000000000000
pub const f128_toint = 1.0 / f128_epsilon;
pub const F80Repr = if (@import("builtin").cpu.arch.endian() == .Little) extern struct {
- fraction: u64,
+ fraction: u64 align(@alignOf(f80)),
exp: u16,
- _pad: u32 = undefined,
} else extern struct {
- exp: u16,
- _pad: u32 = undefined, // TODO verify compatibility with hardware
+ exp: u16 align(@alignOf(f80)),
fraction: u64,
};
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 425808efb1..54468162ad 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1257,19 +1257,29 @@ pub const DeclGen = struct {
},
.Float => {
const llvm_ty = try dg.llvmType(tv.ty);
- if (tv.ty.floatBits(dg.module.getTarget()) <= 64) {
- return llvm_ty.constReal(tv.val.toFloat(f64));
+ switch (tv.ty.floatBits(dg.module.getTarget())) {
+ 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)),
+ 80 => {
+ const float = tv.val.toFloat(f80);
+ const repr = @ptrCast(*const std.math.F80Repr, &float);
+ const llvm_i80 = dg.context.intType(80);
+ var x = llvm_i80.constInt(repr.exp, .False);
+ x = x.constShl(llvm_i80.constInt(64, .False));
+ x = x.constOr(llvm_i80.constInt(repr.fraction, .False));
+ return x.constBitCast(llvm_ty);
+ },
+ 128 => {
+ var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128));
+ // LLVM seems to require that the lower half of the f128 be placed first
+ // in the buffer.
+ if (native_endian == .Big) {
+ std.mem.swap(u64, &buf[0], &buf[1]);
+ }
+ const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
+ return int.constBitCast(llvm_ty);
+ },
+ else => unreachable,
}
-
- var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128));
- // LLVM seems to require that the lower half of the f128 be placed first
- // in the buffer.
- if (native_endian == .Big) {
- std.mem.swap(u64, &buf[0], &buf[1]);
- }
-
- const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
- return int.constBitCast(llvm_ty);
},
.Pointer => switch (tv.val.tag()) {
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index c06f71e834..7b0bcbe2f5 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -9429,17 +9429,29 @@ static void define_builtin_types(CodeGen *g) {
{
ZigType *entry = new_type_table_entry(ZigTypeIdFloat);
+ unsigned u64_alignment = LLVMABIAlignmentOfType(g->target_data_ref, LLVMInt64Type());
+
+ if (u64_alignment >= 8) {
+ entry->size_in_bits = 128;
+ entry->abi_size = 16;
+ entry->abi_align = 16;
+ } else if (u64_alignment >= 4) {
+ entry->size_in_bits = 96;
+ entry->abi_size = 12;
+ entry->abi_align = 4;
+ } else {
+ entry->size_in_bits = 80;
+ entry->abi_size = 10;
+ entry->abi_align = 2;
+ }
if (target_has_f80(g->zig_target)) {
entry->llvm_type = LLVMX86FP80Type();
} else {
- // We use i128 here instead of x86_fp80 because on targets such as arm,
+ // We use an int here instead of x86_fp80 because on targets such as arm,
// LLVM will give "ERROR: Cannot select" for any instructions involving
// the x86_fp80 type.
- entry->llvm_type = get_int_type(g, false, 128)->llvm_type;
+ entry->llvm_type = get_int_type(g, false, entry->size_in_bits)->llvm_type;
}
- entry->size_in_bits = 8 * 16;
- entry->abi_size = 16; // matches LLVMABISizeOfType(LLVMX86FP80Type())
- entry->abi_align = 16; // matches LLVMABIAlignmentOfType(LLVMX86FP80Type())
buf_init_from_str(&entry->name, "f80");
entry->data.floating.bit_count = 80;
diff --git a/src/value.zig b/src/value.zig
index 33a75e08bb..aefb0a3e20 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1122,10 +1122,32 @@ pub const Value = extern union {
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
if (F == f80) {
- // TODO: use std.math.F80Repr?
- const int = std.mem.readInt(u128, buffer[0..16], target.cpu.arch.endian());
- // TODO shouldn't this be a bitcast from u80 to f80 instead of u128 to f80?
- return @bitCast(F, int);
+ switch (target.cpu.arch.endian()) {
+ .Little => {
+ const TargetF80Repr = extern struct {
+ fraction: u64,
+ exp: u16,
+ };
+ const target_repr = @ptrCast(*align(1) const TargetF80Repr, buffer.ptr);
+ const real_repr: std.math.F80Repr = .{
+ .fraction = target_repr.fraction,
+ .exp = target_repr.exp,
+ };
+ return @ptrCast(*const f80, &real_repr).*;
+ },
+ .Big => {
+ const TargetF80Repr = extern struct {
+ exp: u16,
+ fraction: u64,
+ };
+ const target_repr = @ptrCast(*align(1) const TargetF80Repr, buffer.ptr);
+ const real_repr: std.math.F80Repr = .{
+ .fraction = target_repr.fraction,
+ .exp = target_repr.exp,
+ };
+ return @ptrCast(*const f80, &real_repr).*;
+ },
+ }
}
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
From 1c23321d03a48558b02c2faf818b82811b8ab11d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 21:47:18 -0700
Subject: [PATCH 0139/2031] stage1: fix softfloat not getting correct
endianness
needed to include platform.h in more places so that LITTLEENDIAN will be
defined appropriately.
closes #10860
---
CMakeLists.txt | 3 +++
deps/SoftFloat-3e/source/include/primitiveTypes.h | 1 +
deps/SoftFloat-3e/source/include/softfloat_types.h | 1 +
3 files changed, 5 insertions(+)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8fd1960518..a6edfa04ac 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -791,6 +791,9 @@ add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
set_target_properties(opt_c_util PROPERTIES
COMPILE_FLAGS "${OPTIMIZED_C_FLAGS}"
)
+target_include_directories(opt_c_util PRIVATE
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e-prebuilt"
+)
add_library(zigstage1 STATIC ${STAGE1_SOURCES})
set_target_properties(zigstage1 PROPERTIES
diff --git a/deps/SoftFloat-3e/source/include/primitiveTypes.h b/deps/SoftFloat-3e/source/include/primitiveTypes.h
index a4a6dd11c6..4407f5e7cb 100644
--- a/deps/SoftFloat-3e/source/include/primitiveTypes.h
+++ b/deps/SoftFloat-3e/source/include/primitiveTypes.h
@@ -37,6 +37,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef primitiveTypes_h
#define primitiveTypes_h 1
+#include "platform.h"
#include
#ifdef SOFTFLOAT_FAST_INT64
diff --git a/deps/SoftFloat-3e/source/include/softfloat_types.h b/deps/SoftFloat-3e/source/include/softfloat_types.h
index bc30e31440..27507741af 100644
--- a/deps/SoftFloat-3e/source/include/softfloat_types.h
+++ b/deps/SoftFloat-3e/source/include/softfloat_types.h
@@ -37,6 +37,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef softfloat_types_h
#define softfloat_types_h 1
+#include "platform.h"
#include
/*----------------------------------------------------------------------------
From a024aff9324e827d6595e44f922d87f8ed2dbd0d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 22:06:43 -0700
Subject: [PATCH 0140/2031] make f80 less hacky; lower as u80 on non-x86
Get rid of `std.math.F80Repr`. Instead of trying to match the memory
layout of f80, we treat it as a value, same as the other floating point
types. The functions `make_f80` and `break_f80` are introduced to
compose an f80 value out of its parts, and the inverse operation.
stage2 LLVM backend: fix pointer to zero length array tripping LLVM
assertion. It now checks for when the element type is a zero-bit type
and lowers such thing the same way that pointers to other zero-bit types
are lowered.
Both stage1 and stage2 LLVM backends are adjusted so that f80 is lowered
as x86_fp80 on x86_64 and i386 architectures, and identical to a u80 on
others. LLVM constants are lowered in a less hacky way now that #10860
is fixed, by using the expression `(exp << 64) | fraction` using llvm
constants.
Sema is improved to handle c_longdouble by recursively handling it
correctly for whatever the float bit width is. In both stage1 and
stage2.
---
lib/std/math.zig | 40 ++++++++-----
lib/std/special/compiler_rt/addXf3.zig | 18 +++---
lib/std/special/compiler_rt/compareXf2.zig | 4 +-
lib/std/special/compiler_rt/extend_f80.zig | 6 +-
lib/std/special/compiler_rt/trunc_f80.zig | 6 +-
src/codegen/llvm.zig | 41 ++++++++-----
src/stage1/codegen.cpp | 68 +++++++++++-----------
src/type.zig | 55 +++++++++++++++--
src/value.zig | 67 ++++++++++++---------
test/behavior/floatop.zig | 5 +-
10 files changed, 200 insertions(+), 110 deletions(-)
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 8398842e28..4b8bcf2287 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -42,19 +42,11 @@ pub const f128_max = @bitCast(f128, @as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF
pub const f128_epsilon = @bitCast(f128, @as(u128, 0x3F8F0000000000000000000000000000));
pub const f128_toint = 1.0 / f128_epsilon;
-pub const F80Repr = if (@import("builtin").cpu.arch.endian() == .Little) extern struct {
- fraction: u64 align(@alignOf(f80)),
- exp: u16,
-} else extern struct {
- exp: u16 align(@alignOf(f80)),
- fraction: u64,
-};
-
// float.h details
-pub const f80_true_min = @ptrCast(*const f80, &F80Repr{ .fraction = 1, .exp = 0 }).*;
-pub const f80_min = @ptrCast(*const f80, &F80Repr{ .fraction = 0x8000000000000000, .exp = 1 }).*;
-pub const f80_max = @ptrCast(*const f80, &F80Repr{ .fraction = 0xFFFFFFFFFFFFFFFF, .exp = 0x7FFE }).*;
-pub const f80_epsilon = @ptrCast(*const f80, &F80Repr{ .fraction = 0x8000000000000000, .exp = 0x3FC0 }).*;
+pub const f80_true_min = make_f80(.{ .fraction = 1, .exp = 0 });
+pub const f80_min = make_f80(.{ .fraction = 0x8000000000000000, .exp = 1 });
+pub const f80_max = make_f80(.{ .fraction = 0xFFFFFFFFFFFFFFFF, .exp = 0x7FFE });
+pub const f80_epsilon = make_f80(.{ .fraction = 0x8000000000000000, .exp = 0x3FC0 });
pub const f80_toint = 1.0 / f80_epsilon;
pub const f64_true_min = 4.94065645841246544177e-324;
@@ -104,9 +96,9 @@ pub const qnan_f64 = @bitCast(f64, qnan_u64);
pub const inf_u64 = @as(u64, 0x7FF << 52);
pub const inf_f64 = @bitCast(f64, inf_u64);
-pub const inf_f80 = @ptrCast(*const f80, &F80Repr{ .fraction = 0x8000000000000000, .exp = 0x7fff }).*;
-pub const nan_f80 = @ptrCast(*const f80, &F80Repr{ .fraction = 0xA000000000000000, .exp = 0x7fff }).*;
-pub const qnan_f80 = @ptrCast(*const f80, &F80Repr{ .fraction = 0xC000000000000000, .exp = 0x7fff }).*;
+pub const inf_f80 = make_f80(F80{ .fraction = 0x8000000000000000, .exp = 0x7fff });
+pub const nan_f80 = make_f80(F80{ .fraction = 0xA000000000000000, .exp = 0x7fff });
+pub const qnan_f80 = make_f80(F80{ .fraction = 0xC000000000000000, .exp = 0x7fff });
pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001);
pub const nan_f128 = @bitCast(f128, nan_u128);
@@ -1501,3 +1493,21 @@ test "boolMask" {
pub fn comptimeMod(num: anytype, denom: comptime_int) IntFittingRange(0, denom - 1) {
return @intCast(IntFittingRange(0, denom - 1), @mod(num, denom));
}
+
+pub const F80 = struct {
+ fraction: u64,
+ exp: u16,
+};
+
+pub fn make_f80(repr: F80) f80 {
+ const int = (@as(u80, repr.exp) << 64) | repr.fraction;
+ return @bitCast(f80, int);
+}
+
+pub fn break_f80(x: f80) F80 {
+ const int = @bitCast(u80, x);
+ return .{
+ .fraction = @truncate(u64, int),
+ .exp = @truncate(u16, int >> 64),
+ };
+}
diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/std/special/compiler_rt/addXf3.zig
index 1339cc340d..13758afce7 100644
--- a/lib/std/special/compiler_rt/addXf3.zig
+++ b/lib/std/special/compiler_rt/addXf3.zig
@@ -232,8 +232,8 @@ fn normalize_f80(exp: *i32, significand: *u80) void {
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
- var a_rep align(16) = @ptrCast(*const std.math.F80Repr, &a).*;
- var b_rep align(16) = @ptrCast(*const std.math.F80Repr, &b).*;
+ var a_rep = std.math.break_f80(a);
+ var b_rep = std.math.break_f80(b);
var a_exp: i32 = a_rep.exp & 0x7FFF;
var b_exp: i32 = b_rep.exp & 0x7FFF;
@@ -257,7 +257,7 @@ pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
std.debug.assert(a_rep.fraction & significand_mask != 0);
// NaN + anything = qNaN
a_rep.fraction |= qnan_bit;
- return @ptrCast(*const f80, &a_rep).*;
+ return std.math.make_f80(a_rep);
}
}
if (b_exp == max_exp) {
@@ -268,7 +268,7 @@ pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
std.debug.assert(b_rep.fraction & significand_mask != 0);
// anything + NaN = qNaN
b_rep.fraction |= qnan_bit;
- return @ptrCast(*const f80, &b_rep).*;
+ return std.math.make_f80(b_rep);
}
}
@@ -279,7 +279,7 @@ pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
if (b_zero) {
// but we need to get the sign right for zero + zero
a_rep.exp &= b_rep.exp;
- return @ptrCast(*const f80, &a_rep).*;
+ return std.math.make_f80(a_rep);
} else {
return b;
}
@@ -359,7 +359,7 @@ pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
if (a_exp >= max_exp) {
a_rep.exp = max_exp | result_sign;
a_rep.fraction = int_bit; // integer bit is set for +/-inf
- return @ptrCast(*const f80, &a_rep).*;
+ return std.math.make_f80(a_rep);
}
if (a_exp <= 0) {
@@ -387,13 +387,13 @@ pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
a_rep.fraction = @truncate(u64, a_int);
a_rep.exp = @truncate(u16, a_int >> significand_bits);
- return @ptrCast(*const f80, &a_rep).*;
+ return std.math.make_f80(a_rep);
}
pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
- var b_rep align(16) = @ptrCast(*const std.math.F80Repr, &b).*;
+ var b_rep = std.math.break_f80(b);
b_rep.exp ^= 0x8000;
- return __addxf3(a, @ptrCast(*const f80, &b_rep).*);
+ return __addxf3(a, std.math.make_f80(b_rep));
}
test {
diff --git a/lib/std/special/compiler_rt/compareXf2.zig b/lib/std/special/compiler_rt/compareXf2.zig
index 36f6f5f1c1..9640298f8f 100644
--- a/lib/std/special/compiler_rt/compareXf2.zig
+++ b/lib/std/special/compiler_rt/compareXf2.zig
@@ -147,8 +147,8 @@ pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
// Comparison between f80
pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
- const a_rep = @ptrCast(*const std.math.F80Repr, &a).*;
- const b_rep = @ptrCast(*const std.math.F80Repr, &b).*;
+ const a_rep = std.math.break_f80(a);
+ const b_rep = std.math.break_f80(b);
const sig_bits = std.math.floatMantissaBits(f80);
const int_bit = 0x8000000000000000;
const sign_bit = 0x8000;
diff --git a/lib/std/special/compiler_rt/extend_f80.zig b/lib/std/special/compiler_rt/extend_f80.zig
index 29ba8560ce..4686421db0 100644
--- a/lib/std/special/compiler_rt/extend_f80.zig
+++ b/lib/std/special/compiler_rt/extend_f80.zig
@@ -41,7 +41,7 @@ inline fn extendF80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(s
const src_qnan = 1 << (src_sig_bits - 1);
const src_nan_code = src_qnan - 1;
- var dst: std.math.F80Repr align(16) = undefined;
+ var dst: std.math.F80 = undefined;
// Break a into a sign and representation of the absolute value
const a_abs = a & src_abs_mask;
@@ -83,7 +83,7 @@ inline fn extendF80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(s
}
dst.exp |= sign;
- return @ptrCast(*const f80, &dst).*;
+ return std.math.make_f80(dst);
}
pub fn __extendxftf2(a: f80) callconv(.C) f128 {
@@ -99,7 +99,7 @@ pub fn __extendxftf2(a: f80) callconv(.C) f128 {
const dst_min_normal = @as(u128, 1) << dst_sig_bits;
// Break a into a sign and representation of the absolute value
- var a_rep = @ptrCast(*const std.math.F80Repr, &a).*;
+ var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
var abs_result: u128 = undefined;
diff --git a/lib/std/special/compiler_rt/trunc_f80.zig b/lib/std/special/compiler_rt/trunc_f80.zig
index 567d03be63..19e8d44b86 100644
--- a/lib/std/special/compiler_rt/trunc_f80.zig
+++ b/lib/std/special/compiler_rt/trunc_f80.zig
@@ -42,7 +42,7 @@ inline fn trunc(comptime dst_t: type, a: f80) dst_t {
const dst_nan_mask = dst_qnan - 1;
// Break a into a sign and representation of the absolute value
- var a_rep = @ptrCast(*const std.math.F80Repr, &a).*;
+ var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
@@ -125,7 +125,7 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
const a_abs = a_rep & src_abs_mask;
const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
- var res: std.math.F80Repr align(16) = undefined;
+ var res: std.math.F80 = undefined;
if (a_abs > src_inf) {
// a is NaN.
@@ -155,5 +155,5 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
}
res.exp |= sign;
- return @ptrCast(*const f80, &res).*;
+ return std.math.make_f80(res);
}
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 54468162ad..b2adb898d3 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -824,23 +824,24 @@ pub const DeclGen = struct {
fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type {
const gpa = dg.gpa;
+ const target = dg.module.getTarget();
switch (t.zigTypeTag()) {
.Void, .NoReturn => return dg.context.voidType(),
.Int => {
- const info = t.intInfo(dg.module.getTarget());
+ const info = t.intInfo(target);
return dg.context.intType(info.bits);
},
.Enum => {
var buffer: Type.Payload.Bits = undefined;
const int_ty = t.intTagType(&buffer);
- const bit_count = int_ty.intInfo(dg.module.getTarget()).bits;
+ const bit_count = int_ty.intInfo(target).bits;
return dg.context.intType(bit_count);
},
- .Float => switch (t.floatBits(dg.module.getTarget())) {
+ .Float => switch (t.floatBits(target)) {
16 => return dg.context.halfType(),
32 => return dg.context.floatType(),
64 => return dg.context.doubleType(),
- 80 => return dg.context.x86FP80Type(),
+ 80 => return if (backendSupportsF80(target)) dg.context.x86FP80Type() else dg.context.intType(80),
128 => return dg.context.fp128Type(),
else => unreachable,
},
@@ -859,7 +860,8 @@ pub const DeclGen = struct {
const llvm_addrspace = dg.llvmAddressSpace(t.ptrAddressSpace());
const elem_ty = t.childType();
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
- .Opaque, .Array, .Fn => true,
+ .Opaque, .Fn => true,
+ .Array => elem_ty.childType().hasRuntimeBits(),
else => elem_ty.hasRuntimeBits(),
};
const llvm_elem_ty = if (lower_elem_ty)
@@ -889,9 +891,11 @@ pub const DeclGen = struct {
else => unreachable,
},
.Array => {
- const elem_type = try dg.llvmType(t.childType());
+ const elem_ty = t.childType();
+ assert(elem_ty.onePossibleValue() == null);
+ const elem_llvm_ty = try dg.llvmType(elem_ty);
const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null);
- return elem_type.arrayType(@intCast(c_uint, total_len));
+ return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
},
.Vector => {
const elem_type = try dg.llvmType(t.childType());
@@ -978,7 +982,6 @@ pub const DeclGen = struct {
if (struct_obj.layout == .Packed) {
try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count() * 2);
- const target = dg.module.getTarget();
comptime assert(Type.packed_struct_layout_version == 1);
var offset: u64 = 0;
var big_align: u32 = 0;
@@ -1073,7 +1076,6 @@ pub const DeclGen = struct {
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
const union_obj = t.cast(Type.Payload.Union).?.data;
- const target = dg.module.getTarget();
if (t.unionTagType()) |enum_tag_ty| {
const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty);
const layout = union_obj.getLayout(target, true);
@@ -1141,7 +1143,6 @@ pub const DeclGen = struct {
},
.Fn => {
const fn_info = t.fnInfo();
- const target = dg.module.getTarget();
const sret = firstParamSRet(fn_info, target);
const return_type = fn_info.return_type;
const raw_llvm_ret_ty = try dg.llvmType(return_type);
@@ -1257,16 +1258,21 @@ pub const DeclGen = struct {
},
.Float => {
const llvm_ty = try dg.llvmType(tv.ty);
- switch (tv.ty.floatBits(dg.module.getTarget())) {
+ const target = dg.module.getTarget();
+ switch (tv.ty.floatBits(target)) {
16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)),
80 => {
const float = tv.val.toFloat(f80);
- const repr = @ptrCast(*const std.math.F80Repr, &float);
+ const repr = std.math.break_f80(float);
const llvm_i80 = dg.context.intType(80);
var x = llvm_i80.constInt(repr.exp, .False);
x = x.constShl(llvm_i80.constInt(64, .False));
x = x.constOr(llvm_i80.constInt(repr.fraction, .False));
- return x.constBitCast(llvm_ty);
+ if (backendSupportsF80(target)) {
+ return x.constBitCast(llvm_ty);
+ } else {
+ return x;
+ }
},
128 => {
var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128));
@@ -5353,3 +5359,12 @@ fn isByRef(ty: Type) bool {
},
}
}
+
+/// This function returns true if we expect LLVM to lower x86_fp80 correctly
+/// and false if we expect LLVM to crash if it counters an x86_fp80 type.
+fn backendSupportsF80(target: std.Target) bool {
+ return switch (target.cpu.arch) {
+ .x86_64, .i386 => true,
+ else => false,
+ };
+}
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 7b0bcbe2f5..f1a94b9bb9 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -8195,17 +8195,15 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n
case 64:
return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f64);
case 80: {
- uint64_t buf[2];
- memcpy(&buf, &const_val->data.x_f80, 16);
-#if ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN
- uint64_t tmp = buf[0];
- buf[0] = buf[1];
- buf[1] = tmp;
-#endif
- LLVMValueRef as_i128 = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf);
- if (!target_has_f80(g->zig_target)) return as_i128;
- LLVMValueRef as_int = LLVMConstTrunc(as_i128, LLVMIntType(80));
- return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry));
+ LLVMTypeRef llvm_i80 = LLVMIntType(80);
+ LLVMValueRef x = LLVMConstInt(llvm_i80, const_val->data.x_f80.signExp, false);
+ x = LLVMConstShl(x, LLVMConstInt(llvm_i80, 64, false));
+ x = LLVMConstOr(x, LLVMConstInt(llvm_i80, const_val->data.x_f80.signif, false));
+ if (target_has_f80(g->zig_target)) {
+ return LLVMConstBitCast(x, LLVMX86FP80Type());
+ } else {
+ return x;
+ }
}
case 128:
{
@@ -9429,32 +9427,36 @@ static void define_builtin_types(CodeGen *g) {
{
ZigType *entry = new_type_table_entry(ZigTypeIdFloat);
- unsigned u64_alignment = LLVMABIAlignmentOfType(g->target_data_ref, LLVMInt64Type());
+ entry->size_in_bits = 80;
- if (u64_alignment >= 8) {
- entry->size_in_bits = 128;
- entry->abi_size = 16;
- entry->abi_align = 16;
- } else if (u64_alignment >= 4) {
- entry->size_in_bits = 96;
- entry->abi_size = 12;
- entry->abi_align = 4;
- } else {
- entry->size_in_bits = 80;
- entry->abi_size = 10;
- entry->abi_align = 2;
- }
- if (target_has_f80(g->zig_target)) {
- entry->llvm_type = LLVMX86FP80Type();
- } else {
- // We use an int here instead of x86_fp80 because on targets such as arm,
- // LLVM will give "ERROR: Cannot select" for any instructions involving
- // the x86_fp80 type.
- entry->llvm_type = get_int_type(g, false, entry->size_in_bits)->llvm_type;
- }
buf_init_from_str(&entry->name, "f80");
entry->data.floating.bit_count = 80;
+ switch (g->zig_target->arch) {
+ case ZigLLVM_x86_64:
+ entry->llvm_type = LLVMX86FP80Type();
+ entry->abi_size = 16;
+ entry->abi_align = 16;
+ break;
+ case ZigLLVM_x86:
+ entry->llvm_type = LLVMX86FP80Type();
+ entry->abi_size = 12;
+ entry->abi_align = 4;
+ break;
+ default: {
+ // We use an int here instead of x86_fp80 because on targets such as arm,
+ // LLVM will give "ERROR: Cannot select" for any instructions involving
+ // the x86_fp80 type.
+ ZigType *u80_ty = get_int_type(g, false, 80);
+ assert(!target_has_f80(g->zig_target));
+ assert(u80_ty->size_in_bits == entry->size_in_bits);
+ entry->llvm_type = get_llvm_type(g, u80_ty);
+ entry->abi_size = u80_ty->abi_size;
+ entry->abi_align = u80_ty->abi_align;
+ break;
+ }
+ }
+
entry->llvm_di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
entry->size_in_bits, ZigLLVMEncoding_DW_ATE_unsigned());
diff --git a/src/type.zig b/src/type.zig
index 0827b2e2d7..27fdb0abc8 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -1877,9 +1877,28 @@ pub const Type = extern union {
.f16 => return 2,
.f32 => return 4,
.f64 => return 8,
- .f80 => return 16,
.f128 => return 16,
- .c_longdouble => return 16,
+
+ .f80 => switch (target.cpu.arch) {
+ .i386 => return 4,
+ .x86_64 => return 16,
+ else => {
+ var payload: Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = 80,
+ };
+ const u80_ty = initPayload(&payload.base);
+ return abiAlignment(u80_ty, target);
+ },
+ },
+ .c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
+ 16 => return abiAlignment(Type.f16, target),
+ 32 => return abiAlignment(Type.f32, target),
+ 64 => return abiAlignment(Type.f64, target),
+ 80 => return abiAlignment(Type.f80, target),
+ 128 => return abiAlignment(Type.f128, target),
+ else => unreachable,
+ },
.error_set,
.error_set_single,
@@ -2158,9 +2177,28 @@ pub const Type = extern union {
.f16 => return 2,
.f32 => return 4,
.f64 => return 8,
- .f80 => return 16,
.f128 => return 16,
- .c_longdouble => return 16,
+
+ .f80 => switch (target.cpu.arch) {
+ .i386 => return 12,
+ .x86_64 => return 16,
+ else => {
+ var payload: Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = 80,
+ };
+ const u80_ty = initPayload(&payload.base);
+ return abiSize(u80_ty, target);
+ },
+ },
+ .c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
+ 16 => return abiSize(Type.f16, target),
+ 32 => return abiSize(Type.f32, target),
+ 64 => return abiSize(Type.f64, target),
+ 80 => return abiSize(Type.f80, target),
+ 128 => return abiSize(Type.f128, target),
+ else => unreachable,
+ },
.error_set,
.error_set_single,
@@ -2349,7 +2387,7 @@ pub const Type = extern union {
.c_ulong => return CType.ulong.sizeInBits(target),
.c_longlong => return CType.longlong.sizeInBits(target),
.c_ulonglong => return CType.ulonglong.sizeInBits(target),
- .c_longdouble => 128,
+ .c_longdouble => return CType.longdouble.sizeInBits(target),
.error_set,
.error_set_single,
@@ -4772,6 +4810,13 @@ pub const Type = extern union {
pub const @"u8" = initTag(.u8);
pub const @"u32" = initTag(.u32);
pub const @"u64" = initTag(.u64);
+
+ pub const @"f16" = initTag(.f16);
+ pub const @"f32" = initTag(.f32);
+ pub const @"f64" = initTag(.f64);
+ pub const @"f80" = initTag(.f80);
+ pub const @"f128" = initTag(.f128);
+
pub const @"bool" = initTag(.bool);
pub const @"usize" = initTag(.usize);
pub const @"isize" = initTag(.isize);
diff --git a/src/value.zig b/src/value.zig
index aefb0a3e20..3479819160 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1112,6 +1112,19 @@ pub const Value = extern union {
}
fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void {
+ if (F == f80) {
+ switch (target.cpu.arch) {
+ .i386, .x86_64 => {
+ const repr = std.math.break_f80(f);
+ std.mem.writeIntLittle(u64, buffer[0..8], repr.fraction);
+ std.mem.writeIntLittle(u16, buffer[8..10], repr.exp);
+ // TODO set the rest of the bytes to undefined. should we use 0xaa
+ // or is there a different way?
+ return;
+ },
+ else => {},
+ }
+ }
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
@@ -1122,41 +1135,43 @@ pub const Value = extern union {
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
if (F == f80) {
- switch (target.cpu.arch.endian()) {
- .Little => {
- const TargetF80Repr = extern struct {
- fraction: u64,
- exp: u16,
- };
- const target_repr = @ptrCast(*align(1) const TargetF80Repr, buffer.ptr);
- const real_repr: std.math.F80Repr = .{
- .fraction = target_repr.fraction,
- .exp = target_repr.exp,
- };
- return @ptrCast(*const f80, &real_repr).*;
- },
- .Big => {
- const TargetF80Repr = extern struct {
- exp: u16,
- fraction: u64,
- };
- const target_repr = @ptrCast(*align(1) const TargetF80Repr, buffer.ptr);
- const real_repr: std.math.F80Repr = .{
- .fraction = target_repr.fraction,
- .exp = target_repr.exp,
- };
- return @ptrCast(*const f80, &real_repr).*;
- },
+ switch (target.cpu.arch) {
+ .i386, .x86_64 => return std.math.make_f80(.{
+ .fraction = std.mem.readIntLittle(u64, buffer[0..8]),
+ .exp = std.mem.readIntLittle(u16, buffer[8..10]),
+ }),
+ else => {},
}
}
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
} });
- const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
+ const int = readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
return @bitCast(F, int);
}
+ fn readInt(comptime Int: type, buffer: *const [@sizeOf(Int)]u8, endian: std.builtin.Endian) Int {
+ var result: Int = 0;
+ switch (endian) {
+ .Big => {
+ for (buffer) |byte| {
+ result <<= 8;
+ result |= byte;
+ }
+ },
+ .Little => {
+ var i: usize = buffer.len;
+ while (i != 0) {
+ i -= 1;
+ result <<= 8;
+ result |= buffer[i];
+ }
+ },
+ }
+ return result;
+ }
+
/// Asserts that the value is a float or an integer.
pub fn toFloat(val: Value, comptime T: type) T {
return switch (val.tag()) {
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index ed632c26c5..00f4cff6e5 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -5,7 +5,10 @@ const math = std.math;
const pi = std.math.pi;
const e = std.math.e;
const Vector = std.meta.Vector;
-const has_f80_rt = @import("builtin").cpu.arch == .x86_64;
+const has_f80_rt = switch (builtin.cpu.arch) {
+ .x86_64, .i386 => true,
+ else => false,
+};
const epsilon_16 = 0.001;
const epsilon = 0.000001;
From 1a8987fe7c0e8ee43eb8f3dcccb932b7b2afcee2 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 22:19:44 -0700
Subject: [PATCH 0141/2031] CI: additionally test stage2 LLVM backend targeting
aarch64-linux
---
ci/zinc/linux_test.sh | 1 +
1 file changed, 1 insertion(+)
diff --git a/ci/zinc/linux_test.sh b/ci/zinc/linux_test.sh
index d9f42e6876..8f3eaacc7e 100755
--- a/ci/zinc/linux_test.sh
+++ b/ci/zinc/linux_test.sh
@@ -5,6 +5,7 @@
ZIG=$DEBUG_STAGING/bin/zig
$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM
+$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -ofmt=c
$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin
From 335c680cde670776280c3a812adb64abd1311d97 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 22:28:53 -0700
Subject: [PATCH 0142/2031] LLVM backend: fix union with only 1 tag tripping
llvm assertion
---
src/codegen/llvm.zig | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index b2adb898d3..ec0ead94e4 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -835,6 +835,7 @@ pub const DeclGen = struct {
var buffer: Type.Payload.Bits = undefined;
const int_ty = t.intTagType(&buffer);
const bit_count = int_ty.intInfo(target).bits;
+ assert(bit_count != 0);
return dg.context.intType(bit_count);
},
.Float => switch (t.floatBits(target)) {
@@ -1077,10 +1078,10 @@ pub const DeclGen = struct {
const union_obj = t.cast(Type.Payload.Union).?.data;
if (t.unionTagType()) |enum_tag_ty| {
- const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty);
const layout = union_obj.getLayout(target, true);
if (layout.payload_size == 0) {
+ const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty);
gop.value_ptr.* = enum_tag_llvm_ty;
return enum_tag_llvm_ty;
}
@@ -1111,6 +1112,7 @@ pub const DeclGen = struct {
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
return llvm_union_ty;
}
+ const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty);
// Put the tag before or after the payload depending on which one's
// alignment is greater.
From d72f832b1ebab0db106e64bc9f59eba90c414311 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 23:03:13 -0700
Subject: [PATCH 0143/2031] LLVM backend: call constPtrToInt instead of
constBitCast
when appropriate. Avoids tripping an LLVM assertion.
---
src/codegen/llvm.zig | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index ec0ead94e4..85cb808996 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1858,7 +1858,11 @@ pub const DeclGen = struct {
try self.resolveGlobalDecl(decl);
const llvm_type = try self.llvmType(tv.ty);
- return llvm_val.constBitCast(llvm_type);
+ if (tv.ty.zigTypeTag() == .Int) {
+ return llvm_val.constPtrToInt(llvm_type);
+ } else {
+ return llvm_val.constBitCast(llvm_type);
+ }
}
fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*const llvm.Value {
From b92e1ab8ccf4e450467dfd5ee8ff9ea87c148c26 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 23:51:09 -0700
Subject: [PATCH 0144/2031] stage1: override f80 alignment for i386-windows
Comment reproduced here:
Note the following u64 alignments:
x86-linux: 4
x86-windows: 8
LLVM makes x86_fp80 have the following alignment and sizes regardless
of operating system:
x86_64: size=16, align=16
x86: size=12, align=4
However in Zig we override x86-windows to have size=16, align=16
in order for the property to hold that u80 and f80 have the same ABI size.
Fixes "error: destination type 'f80' has size 12 but source type 'u80'
has size 16" when trying to bitcast between f80 and u80 on i386-windows.
---
src/stage1/codegen.cpp | 47 ++++++++++++++++++++++++++----------------
1 file changed, 29 insertions(+), 18 deletions(-)
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index f1a94b9bb9..dc8f5e590e 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -9432,29 +9432,40 @@ static void define_builtin_types(CodeGen *g) {
buf_init_from_str(&entry->name, "f80");
entry->data.floating.bit_count = 80;
- switch (g->zig_target->arch) {
- case ZigLLVM_x86_64:
- entry->llvm_type = LLVMX86FP80Type();
+ if (target_has_f80(g->zig_target)) {
+ entry->llvm_type = LLVMX86FP80Type();
+
+ // Note the following u64 alignments:
+ // x86-linux: 4
+ // x86-windows: 8
+ // LLVM makes x86_fp80 have the following alignment and sizes regardless
+ // of operating system:
+ // x86_64: size=16, align=16
+ // x86: size=12, align=4
+ // However in Zig we override x86-windows to have size=16, align=16
+ // in order for the property to hold that u80 and f80 have the same ABI size.
+ unsigned u64_alignment = LLVMABIAlignmentOfType(g->target_data_ref, LLVMInt64Type());
+
+ if (u64_alignment >= 8) {
entry->abi_size = 16;
entry->abi_align = 16;
- break;
- case ZigLLVM_x86:
- entry->llvm_type = LLVMX86FP80Type();
+ } else if (u64_alignment >= 4) {
entry->abi_size = 12;
entry->abi_align = 4;
- break;
- default: {
- // We use an int here instead of x86_fp80 because on targets such as arm,
- // LLVM will give "ERROR: Cannot select" for any instructions involving
- // the x86_fp80 type.
- ZigType *u80_ty = get_int_type(g, false, 80);
- assert(!target_has_f80(g->zig_target));
- assert(u80_ty->size_in_bits == entry->size_in_bits);
- entry->llvm_type = get_llvm_type(g, u80_ty);
- entry->abi_size = u80_ty->abi_size;
- entry->abi_align = u80_ty->abi_align;
- break;
+ } else {
+ entry->abi_size = 10;
+ entry->abi_align = u64_alignment;
}
+ } else {
+ // We use an int here instead of x86_fp80 because on targets such as arm,
+ // LLVM will give "ERROR: Cannot select" for any instructions involving
+ // the x86_fp80 type.
+ ZigType *u80_ty = get_int_type(g, false, 80);
+ assert(!target_has_f80(g->zig_target));
+ assert(u80_ty->size_in_bits == entry->size_in_bits);
+ entry->llvm_type = get_llvm_type(g, u80_ty);
+ entry->abi_size = u80_ty->abi_size;
+ entry->abi_align = u80_ty->abi_align;
}
entry->llvm_di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
From ba31a9469f48065c9ebf935160faffd0f6c9bfdc Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 10 Feb 2022 23:56:09 -0700
Subject: [PATCH 0145/2031] Sema: int casting to u0 returns const value
Also shift left with u0 rhs returns lhs even when lhs is runtime known.
---
src/Sema.zig | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 3b4f1c6f55..41c841e298 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5977,8 +5977,13 @@ fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.fail(block, src, "unable to cast runtime value to 'comptime_int'", .{});
}
- try sema.requireRuntimeBlock(block, operand_src);
// TODO insert safety check to make sure the value fits in the dest type
+
+ if ((try sema.typeHasOnePossibleValue(block, dest_ty_src, dest_ty))) |opv| {
+ return sema.addConstant(dest_ty, opv);
+ }
+
+ try sema.requireRuntimeBlock(block, operand_src);
return block.addTyOp(.intcast, dest_ty, operand);
}
@@ -7537,17 +7542,21 @@ fn zirShl(
const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs);
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(sema.typeOf(lhs));
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return lhs;
+ }
+ }
+
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
const lhs_ty = sema.typeOf(lhs);
if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
const rhs_val = maybe_rhs_val orelse break :rs rhs_src;
- if (rhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
- // If rhs is 0, return lhs without doing any calculations.
- if (rhs_val.compareWithZero(.eq)) {
- return sema.addConstant(lhs_ty, lhs_val);
- }
const target = sema.mod.getTarget();
const val = switch (air_tag) {
.shl_exact => val: {
@@ -7577,12 +7586,7 @@ fn zirShl(
};
return sema.addConstant(lhs_ty, val);
- } else rs: {
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) return sema.addConstUndef(sema.typeOf(lhs));
- }
- break :rs lhs_src;
- };
+ } else lhs_src;
// TODO: insert runtime safety check for shl_exact
From 38236533f1d031d31c6d10cbe7d0f8c59a9e3520 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 11 Feb 2022 00:03:53 -0700
Subject: [PATCH 0146/2031] LLVM backend: avoid creating invalid LLVM types
Fixes assertions from creating i0 types which are not allowed in LLVM.
---
src/codegen/llvm.zig | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 85cb808996..3c1f0c9737 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -687,9 +687,6 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
const sret = firstParamSRet(fn_info, target);
- const return_type = fn_info.return_type;
- const raw_llvm_ret_ty = try dg.llvmType(return_type);
-
const fn_type = try dg.llvmType(zig_fn_type);
const fqn = try decl.getFullyQualifiedName(dg.gpa);
@@ -708,6 +705,8 @@ pub const DeclGen = struct {
if (sret) {
dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0
dg.addArgAttr(llvm_fn, 0, "noalias");
+
+ const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type);
llvm_fn.addSretAttr(0, raw_llvm_ret_ty);
}
@@ -737,7 +736,7 @@ pub const DeclGen = struct {
// Function attributes that are independent of analysis results of the function body.
dg.addCommonFnAttributes(llvm_fn);
- if (return_type.isNoReturn()) {
+ if (fn_info.return_type.isNoReturn()) {
dg.addFnAttr(llvm_fn, "noreturn");
}
@@ -829,6 +828,7 @@ pub const DeclGen = struct {
.Void, .NoReturn => return dg.context.voidType(),
.Int => {
const info = t.intInfo(target);
+ assert(info.bits != 0);
return dg.context.intType(info.bits);
},
.Enum => {
@@ -1147,17 +1147,17 @@ pub const DeclGen = struct {
const fn_info = t.fnInfo();
const sret = firstParamSRet(fn_info, target);
const return_type = fn_info.return_type;
- const raw_llvm_ret_ty = try dg.llvmType(return_type);
- const llvm_ret_ty = if (!return_type.hasRuntimeBits() or sret)
- dg.context.voidType()
+ const llvm_sret_ty = if (return_type.hasRuntimeBits())
+ try dg.llvmType(return_type)
else
- raw_llvm_ret_ty;
+ dg.context.voidType();
+ const llvm_ret_ty = if (sret) dg.context.voidType() else llvm_sret_ty;
var llvm_params = std.ArrayList(*const llvm.Type).init(dg.gpa);
defer llvm_params.deinit();
if (sret) {
- try llvm_params.append(raw_llvm_ret_ty.pointerType(0));
+ try llvm_params.append(llvm_sret_ty.pointerType(0));
}
for (fn_info.param_types) |param_ty| {
@@ -1210,6 +1210,7 @@ pub const DeclGen = struct {
const bigint = tv.val.toBigInt(&bigint_space);
const target = dg.module.getTarget();
const int_info = tv.ty.intInfo(target);
+ assert(int_info.bits != 0);
const llvm_type = dg.context.intType(int_info.bits);
const unsigned_val = v: {
@@ -2241,7 +2242,6 @@ pub const FuncGen = struct {
};
const fn_info = zig_fn_ty.fnInfo();
const return_type = fn_info.return_type;
- const llvm_ret_ty = try self.dg.llvmType(return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
const target = self.dg.module.getTarget();
const sret = firstParamSRet(fn_info, target);
@@ -2250,6 +2250,7 @@ pub const FuncGen = struct {
defer llvm_args.deinit();
const ret_ptr = if (!sret) null else blk: {
+ const llvm_ret_ty = try self.dg.llvmType(return_type);
const ret_ptr = self.buildAlloca(llvm_ret_ty);
ret_ptr.setAlignment(return_type.abiAlignment(target));
try llvm_args.append(ret_ptr);
@@ -2284,6 +2285,7 @@ pub const FuncGen = struct {
} else if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBits()) {
return null;
} else if (sret) {
+ const llvm_ret_ty = try self.dg.llvmType(return_type);
call.setCallSret(llvm_ret_ty);
return ret_ptr;
} else {
From 91508e10abe4ab82e4f7a4dcdfac178ebd9e52d3 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 11 Feb 2022 00:33:51 -0700
Subject: [PATCH 0147/2031] LLVM backend: handle unnamed structs when lowering
array values
LLVM doesn't support lowering union values, so we have to use unnamed
structs to do it, which means any type that contains a union as an
element, even if it is nested in another type, has to have a mechanism
to detect when it can't be lowered normally and has to resort itself to
an unnamed struct.
This includes arrays.
---
src/codegen/llvm.zig | 66 ++++++++++++++++++++++++++++++++------------
1 file changed, 48 insertions(+), 18 deletions(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 3c1f0c9737..3b9bac09ec 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1371,14 +1371,24 @@ pub const DeclGen = struct {
const gpa = dg.gpa;
const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len);
defer gpa.free(llvm_elems);
+ var need_unnamed = false;
for (elem_vals) |elem_val, i| {
llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
+ }
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ .True,
+ );
+ } else {
+ const llvm_elem_ty = try dg.llvmType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
}
- const llvm_elem_ty = try dg.llvmType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
},
.repeated => {
const val = tv.val.castTag(.repeated).?.data;
@@ -1389,25 +1399,46 @@ pub const DeclGen = struct {
const gpa = dg.gpa;
const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
- for (llvm_elems[0..len]) |*elem| {
- elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val });
+
+ var need_unnamed = false;
+ if (len != 0) {
+ for (llvm_elems[0..len]) |*elem| {
+ elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val });
+ }
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
}
+
if (sentinel) |sent| {
llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent });
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
+ }
+
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ .True,
+ );
+ } else {
+ const llvm_elem_ty = try dg.llvmType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
}
- const llvm_elem_ty = try dg.llvmType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
},
.empty_array_sentinel => {
const elem_ty = tv.ty.elemType();
const sent_val = tv.ty.sentinel().?;
const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val });
const llvm_elems: [1]*const llvm.Value = .{sentinel};
- const llvm_elem_ty = try dg.llvmType(elem_ty);
- return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
+ const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
+ if (need_unnamed) {
+ return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True);
+ } else {
+ const llvm_elem_ty = try dg.llvmType(elem_ty);
+ return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
+ }
},
else => unreachable,
},
@@ -1495,7 +1526,7 @@ pub const DeclGen = struct {
var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count);
defer llvm_fields.deinit(gpa);
- var make_unnamed_struct = false;
+ var need_unnamed = false;
const struct_obj = tv.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
const target = dg.module.getTarget();
@@ -1596,14 +1627,13 @@ pub const DeclGen = struct {
.val = field_val,
});
- make_unnamed_struct = make_unnamed_struct or
- dg.isUnnamedType(field_ty, field_llvm_val);
+ need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val);
llvm_fields.appendAssumeCapacity(field_llvm_val);
}
}
- if (make_unnamed_struct) {
+ if (need_unnamed) {
return dg.context.constStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
From 774f9bdb79cc1047b4b6f86c6030817810466151 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 11 Feb 2022 01:07:24 -0700
Subject: [PATCH 0148/2031] LLVM backend: disable failing aarch64 behavior test
---
test/behavior/math.zig | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index c23e8ebe3e..7cf7ca4c5d 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -126,6 +126,13 @@ fn testOneCtz(comptime T: type, x: T) u32 {
}
test "@ctz vectors" {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
+ // TODO this is tripping an LLVM assert:
+ // zig: /home/andy/Downloads/llvm-project-13/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp:198: llvm::LegalizeActionStep llvm::LegalizeRuleSet::apply(const llvm::LegalityQuery&) const: Assertion `mutationIsSane(Rule, Query, Mutation) && "legality mutation invalid for match"' failed.
+ // I need to report a zig issue and an llvm issue
+ return error.SkipZigTest;
+ }
+
try testCtzVectors();
comptime try testCtzVectors();
}
From 16076964d6e7c8f17117614d5a7d83070d5b8902 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 11 Feb 2022 01:27:00 -0700
Subject: [PATCH 0149/2031] disable NaN f80 behavior tests
Let's get all these other bug fixes in and figure out f80 NaN later.
Looks like it's not working at comptime in master branch anyway.
---
test/behavior/math.zig | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 7cf7ca4c5d..a9000353b8 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -988,12 +988,14 @@ test "NaN comparison" {
try testNanEqNan(f32);
try testNanEqNan(f64);
try testNanEqNan(f128);
- if (has_f80_rt and (builtin.zig_backend == .stage1)) try testNanEqNan(f80); // TODO
comptime try testNanEqNan(f16);
comptime try testNanEqNan(f32);
comptime try testNanEqNan(f64);
comptime try testNanEqNan(f128);
- // comptime try testNanEqNan(f80); // TODO
+
+ // TODO make this pass on all targets
+ // try testNanEqNan(f80);
+ // comptime try testNanEqNan(f80);
}
fn testNanEqNan(comptime F: type) !void {
From d542a588c625b737f1680848c9862bd83c30a7c5 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 12 Feb 2022 19:09:58 +0100
Subject: [PATCH 0150/2031] Skip @sqrt f80 test on freebsd
---
test/behavior/floatop.zig | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index 00f4cff6e5..4de8f5a5fc 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -100,8 +100,11 @@ fn testSqrt() !void {
if (builtin.zig_backend == .stage1) {
if (has_f80_rt) {
- var a: f80 = 25;
- try expect(@sqrt(a) == 5);
+ // TODO https://github.com/ziglang/zig/issues/10875
+ if (builtin.os.tag != .freebsd) {
+ var a: f80 = 25;
+ try expect(@sqrt(a) == 5);
+ }
}
{
const a: comptime_float = 25.0;
From 16ec848d2ab5702ad3794d30ed5d776b5abb60ce Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 12 Feb 2022 18:54:08 +0100
Subject: [PATCH 0151/2031] macho: put linker symlink for cache invalidation in
zig-cache
Due to differences in where the output gets emitted in stage1 and stage2,
we were putting the symlink next to the binary rather than in `zig-cache`
directory when building with stage2.
---
src/link/MachO.zig | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 4aa627ca39..b8e2ae0840 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -507,6 +507,15 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
const allow_undef = is_dyn_lib and (self.base.options.allow_shlib_undefined orelse false);
const id_symlink_basename = "zld.id";
+ const cache_dir_handle = blk: {
+ if (use_stage1) {
+ break :blk directory.handle;
+ }
+ if (self.base.options.module) |module| {
+ break :blk module.zig_cache_artifact_directory.handle;
+ }
+ break :blk directory.handle;
+ };
var man: Cache.Manifest = undefined;
defer if (!self.base.options.disable_lld_caching) man.deinit();
@@ -552,7 +561,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = Cache.readSmallFile(
- directory.handle,
+ cache_dir_handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
@@ -588,7 +597,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
});
// We are about to change the output file to be different, so we invalidate the build hash now.
- directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
+ cache_dir_handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
@@ -621,7 +630,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
} else {
if (use_stage1) {
const sub_path = self.base.options.emit.?.sub_path;
- self.base.file = try directory.handle.createFile(sub_path, .{
+ self.base.file = try cache_dir_handle.createFile(sub_path, .{
.truncate = true,
.read = true,
.mode = link.determineMode(self.base.options),
@@ -1080,7 +1089,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
if (use_stage1 and self.base.options.disable_lld_caching) break :cache;
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
- Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
+ Cache.writeSmallFile(cache_dir_handle, id_symlink_basename, &digest) catch |err| {
log.debug("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
From a005ac9d3c884c6254074ec150fe536881fe31b5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 12 Feb 2022 20:44:30 -0700
Subject: [PATCH 0152/2031] stage2: implement `@popCount` for SIMD vectors
---
src/Sema.zig | 36 +++----------------------
src/codegen/llvm.zig | 15 +++++++----
src/value.zig | 45 ++++++++++++++++++-------------
test/behavior.zig | 1 -
test/behavior/popcount.zig | 21 ++++++++++++++-
test/behavior/popcount_stage1.zig | 24 -----------------
6 files changed, 61 insertions(+), 81 deletions(-)
delete mode 100644 test/behavior/popcount_stage1.zig
diff --git a/src/Sema.zig b/src/Sema.zig
index 41c841e298..7dbc36af37 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -720,7 +720,6 @@ fn analyzeBodyInner(
.align_cast => try sema.zirAlignCast(block, inst),
.has_decl => try sema.zirHasDecl(block, inst),
.has_field => try sema.zirHasField(block, inst),
- .pop_count => try sema.zirPopCount(block, inst),
.byte_swap => try sema.zirByteSwap(block, inst),
.bit_reverse => try sema.zirBitReverse(block, inst),
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
@@ -743,8 +742,9 @@ fn analyzeBodyInner(
.await_nosuspend => try sema.zirAwait(block, inst, true),
.extended => try sema.zirExtended(block, inst),
- .clz => try sema.zirClzCtz(block, inst, .clz, Value.clz),
- .ctz => try sema.zirClzCtz(block, inst, .ctz, Value.ctz),
+ .clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
+ .ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
+ .pop_count => try sema.zirBitCount(block, inst, .popcount, Value.popCount),
.sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt),
.sin => try sema.zirUnaryMath(block, inst, .sin, Value.sin),
@@ -11487,7 +11487,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
return sema.coerceCompatiblePtrs(block, dest_ty, ptr, ptr_src);
}
-fn zirClzCtz(
+fn zirBitCount(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
@@ -11550,34 +11550,6 @@ fn zirClzCtz(
}
}
-fn zirPopCount(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const operand = sema.resolveInst(inst_data.operand);
- const operand_ty = sema.typeOf(operand);
- // TODO implement support for vectors
- if (operand_ty.zigTypeTag() != .Int) {
- return sema.fail(block, ty_src, "expected integer type, found '{}'", .{
- operand_ty,
- });
- }
- const target = sema.mod.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- if (bits == 0) return Air.Inst.Ref.zero;
-
- const result_ty = try Type.smallestUnsignedInt(sema.arena, bits);
-
- const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
- const result_val = try val.popCount(operand_ty, target, sema.arena);
- return sema.addConstant(result_ty, result_val);
- } else operand_src;
-
- try sema.requireRuntimeBlock(block, runtime_src);
- return block.addTyOp(.popcount, result_ty, operand);
-}
-
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 3b9bac09ec..493b895d5d 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2205,7 +2205,7 @@ pub const FuncGen = struct {
.get_union_tag => try self.airGetUnionTag(inst),
.clz => try self.airClzCtz(inst, "ctlz"),
.ctz => try self.airClzCtz(inst, "cttz"),
- .popcount => try self.airPopCount(inst, "ctpop"),
+ .popcount => try self.airPopCount(inst),
.tag_name => try self.airTagName(inst),
.error_name => try self.airErrorName(inst),
.splat => try self.airSplat(inst),
@@ -4364,7 +4364,7 @@ pub const FuncGen = struct {
}
}
- fn airPopCount(self: *FuncGen, inst: Air.Inst.Index, prefix: [*:0]const u8) !?*const llvm.Value {
+ fn airPopCount(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -4372,11 +4372,16 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const target = self.dg.module.getTarget();
const bits = operand_ty.intInfo(target).bits;
+ const vec_len: ?u32 = switch (operand_ty.zigTypeTag()) {
+ .Vector => operand_ty.vectorLen(),
+ else => null,
+ };
var fn_name_buf: [100]u8 = undefined;
- const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.i{d}", .{
- prefix, bits,
- }) catch unreachable;
+ const llvm_fn_name = if (vec_len) |len|
+ std.fmt.bufPrintZ(&fn_name_buf, "llvm.ctpop.v{d}i{d}", .{ len, bits }) catch unreachable
+ else
+ std.fmt.bufPrintZ(&fn_name_buf, "llvm.ctpop.i{d}", .{bits}) catch unreachable;
const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
const operand_llvm_ty = try self.dg.llvmType(operand_ty);
const param_types = [_]*const llvm.Type{operand_llvm_ty};
diff --git a/src/value.zig b/src/value.zig
index 3479819160..1f93a828aa 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1303,6 +1303,33 @@ pub const Value = extern union {
}
}
+ pub fn popCount(val: Value, ty: Type, target: Target) u64 {
+ assert(!val.isUndef());
+ switch (val.tag()) {
+ .zero, .bool_false => return 0,
+ .one, .bool_true => return 1,
+
+ .int_u64 => return @popCount(u64, val.castTag(.int_u64).?.data),
+
+ else => {
+ const info = ty.intInfo(target);
+
+ var buffer: Value.BigIntSpace = undefined;
+ const operand_bigint = val.toBigInt(&buffer);
+
+ var limbs_buffer: [4]std.math.big.Limb = undefined;
+ var result_bigint = BigIntMutable{
+ .limbs = &limbs_buffer,
+ .positive = undefined,
+ .len = undefined,
+ };
+ result_bigint.popCount(operand_bigint, info.bits);
+
+ return result_bigint.toConst().to(u64) catch unreachable;
+ },
+ }
+ }
+
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
pub fn intBitCountTwosComp(self: Value, target: Target) usize {
@@ -1340,24 +1367,6 @@ pub const Value = extern union {
}
}
- pub fn popCount(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
- assert(!val.isUndef());
-
- const info = ty.intInfo(target);
-
- var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer);
-
- const limbs = try arena.alloc(
- std.math.big.Limb,
- std.math.big.int.calcTwosCompLimbCount(info.bits),
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.popCount(operand_bigint, info.bits);
-
- return fromBigInt(arena, result_bigint.toConst());
- }
-
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
pub fn intFitsInType(self: Value, ty: Type, target: Target) bool {
switch (self.tag()) {
diff --git a/test/behavior.zig b/test/behavior.zig
index 86e48f1797..8188f2baaa 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -153,7 +153,6 @@ test {
_ = @import("behavior/ir_block_deps.zig");
_ = @import("behavior/misc.zig");
_ = @import("behavior/muladd.zig");
- _ = @import("behavior/popcount_stage1.zig");
_ = @import("behavior/reflection.zig");
_ = @import("behavior/select.zig");
_ = @import("behavior/shuffle.zig");
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index eb3e378058..d906c1bcc0 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -1,7 +1,6 @@
const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
-const Vector = std.meta.Vector;
test "@popCount integers" {
comptime try testPopCountIntegers();
@@ -44,3 +43,23 @@ fn testPopCountIntegers() !void {
try expect(@popCount(i128, @as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
}
}
+
+test "@popCount vectors" {
+ comptime try testPopCountVectors();
+ try testPopCountVectors();
+}
+
+fn testPopCountVectors() !void {
+ {
+ var x: @Vector(8, u32) = [1]u32{0xffffffff} ** 8;
+ const expected = [1]u6{32} ** 8;
+ const result: [8]u6 = @popCount(u32, x);
+ try expect(std.mem.eql(u6, &expected, &result));
+ }
+ {
+ var x: @Vector(8, i16) = [1]i16{-1} ** 8;
+ const expected = [1]u5{16} ** 8;
+ const result: [8]u5 = @popCount(i16, x);
+ try expect(std.mem.eql(u5, &expected, &result));
+ }
+}
diff --git a/test/behavior/popcount_stage1.zig b/test/behavior/popcount_stage1.zig
deleted file mode 100644
index 3783fdfe2f..0000000000
--- a/test/behavior/popcount_stage1.zig
+++ /dev/null
@@ -1,24 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const expectEqual = std.testing.expectEqual;
-const Vector = std.meta.Vector;
-
-test "@popCount vectors" {
- comptime try testPopCountVectors();
- try testPopCountVectors();
-}
-
-fn testPopCountVectors() !void {
- {
- var x: Vector(8, u32) = [1]u32{0xffffffff} ** 8;
- const expected = [1]u6{32} ** 8;
- const result: [8]u6 = @popCount(u32, x);
- try expect(std.mem.eql(u6, &expected, &result));
- }
- {
- var x: Vector(8, i16) = [1]i16{-1} ** 8;
- const expected = [1]u5{16} ** 8;
- const result: [8]u5 = @popCount(i16, x);
- try expect(std.mem.eql(u5, &expected, &result));
- }
-}
From c349191b75811f8a21e26f8b175483449fae1638 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 12 Feb 2022 21:13:07 -0700
Subject: [PATCH 0153/2031] organize behavior tests
moving towards disabling failing tests on an individual basis
---
test/behavior.zig | 7 -
test/behavior/array.zig | 350 ++++++++
test/behavior/array_llvm.zig | 315 --------
test/behavior/cast.zig | 340 +++++++-
test/behavior/cast_llvm.zig | 299 -------
test/behavior/error.zig | 26 +
test/behavior/error_llvm.zig | 24 -
test/behavior/sizeof_and_typeof.zig | 113 +++
test/behavior/sizeof_and_typeof_stage1.zig | 105 ---
test/behavior/struct.zig | 894 ++++++++++++++++++++-
test/behavior/struct_llvm.zig | 802 ------------------
test/behavior/truncate.zig | 13 +
test/behavior/truncate_stage1.zig | 13 -
test/behavior/type.zig | 386 +++++++++
test/behavior/type_stage1.zig | 362 ---------
15 files changed, 2111 insertions(+), 1938 deletions(-)
delete mode 100644 test/behavior/array_llvm.zig
delete mode 100644 test/behavior/cast_llvm.zig
delete mode 100644 test/behavior/error_llvm.zig
delete mode 100644 test/behavior/sizeof_and_typeof_stage1.zig
delete mode 100644 test/behavior/struct_llvm.zig
delete mode 100644 test/behavior/truncate_stage1.zig
delete mode 100644 test/behavior/type_stage1.zig
diff --git a/test/behavior.zig b/test/behavior.zig
index 8188f2baaa..404ce376a2 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -46,7 +46,6 @@ test {
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
- _ = @import("behavior/array_llvm.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bugs/624.zig");
_ = @import("behavior/bugs/704.zig");
@@ -61,7 +60,6 @@ test {
_ = @import("behavior/bugs/4954.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");
- _ = @import("behavior/cast_llvm.zig");
_ = @import("behavior/defer.zig");
_ = @import("behavior/enum.zig");
_ = @import("behavior/error.zig");
@@ -98,7 +96,6 @@ test {
// Tests that pass for stage1 and the llvm backend.
_ = @import("behavior/atomics.zig");
_ = @import("behavior/bugs/9584.zig");
- _ = @import("behavior/error_llvm.zig");
_ = @import("behavior/eval.zig");
_ = @import("behavior/floatop.zig");
_ = @import("behavior/math.zig");
@@ -107,7 +104,6 @@ test {
_ = @import("behavior/popcount.zig");
_ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
- _ = @import("behavior/struct_llvm.zig");
_ = @import("behavior/switch.zig");
_ = @import("behavior/widening.zig");
@@ -156,14 +152,11 @@ test {
_ = @import("behavior/reflection.zig");
_ = @import("behavior/select.zig");
_ = @import("behavior/shuffle.zig");
- _ = @import("behavior/sizeof_and_typeof_stage1.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
_ = @import("behavior/switch_prong_implicit_cast.zig");
- _ = @import("behavior/truncate_stage1.zig");
_ = @import("behavior/tuple.zig");
- _ = @import("behavior/type_stage1.zig");
_ = @import("behavior/typename.zig");
_ = @import("behavior/union_with_members.zig");
_ = @import("behavior/var_args.zig");
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index 0450d0781b..23820e71b5 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -222,3 +222,353 @@ test "anonymous list literal syntax" {
try S.doTheTest();
comptime try S.doTheTest();
}
+
+var s_array: [8]Sub = undefined;
+const Sub = struct { b: u8 };
+const Str = struct { a: []Sub };
+test "set global var array via slice embedded in struct" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var s = Str{ .a = s_array[0..] };
+
+ s.a[0].b = 1;
+ s.a[1].b = 2;
+ s.a[2].b = 3;
+
+ try expect(s_array[0].b == 1);
+ try expect(s_array[1].b == 2);
+ try expect(s_array[2].b == 3);
+}
+
+test "read/write through global variable array of struct fields initialized via array mult" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ try expect(storage[0].term == 1);
+ storage[0] = MyStruct{ .term = 123 };
+ try expect(storage[0].term == 123);
+ }
+
+ pub const MyStruct = struct {
+ term: usize,
+ };
+
+ var storage: [1]MyStruct = [_]MyStruct{MyStruct{ .term = 1 }} ** 1;
+ };
+ try S.doTheTest();
+}
+
+test "implicit cast single-item pointer" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ try testImplicitCastSingleItemPtr();
+ comptime try testImplicitCastSingleItemPtr();
+}
+
+fn testImplicitCastSingleItemPtr() !void {
+ var byte: u8 = 100;
+ const slice = @as(*[1]u8, &byte)[0..];
+ slice[0] += 1;
+ try expect(byte == 101);
+}
+
+fn testArrayByValAtComptime(b: [2]u8) u8 {
+ return b[0];
+}
+
+test "comptime evaluating function that takes array by value" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const arr = [_]u8{ 1, 2 };
+ const x = comptime testArrayByValAtComptime(arr);
+ const y = comptime testArrayByValAtComptime(arr);
+ try expect(x == 1);
+ try expect(y == 1);
+}
+
+test "runtime initialize array elem and then implicit cast to slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var two: i32 = 2;
+ const x: []const i32 = &[_]i32{two};
+ try expect(x[0] == 2);
+}
+
+test "array literal as argument to function" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn entry(two: i32) !void {
+ try foo(&[_]i32{ 1, 2, 3 });
+ try foo(&[_]i32{ 1, two, 3 });
+ try foo2(true, &[_]i32{ 1, 2, 3 });
+ try foo2(true, &[_]i32{ 1, two, 3 });
+ }
+ fn foo(x: []const i32) !void {
+ try expect(x[0] == 1);
+ try expect(x[1] == 2);
+ try expect(x[2] == 3);
+ }
+ fn foo2(trash: bool, x: []const i32) !void {
+ try expect(trash);
+ try expect(x[0] == 1);
+ try expect(x[1] == 2);
+ try expect(x[2] == 3);
+ }
+ };
+ try S.entry(2);
+ comptime try S.entry(2);
+}
+
+test "double nested array to const slice cast in array literal" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn entry(two: i32) !void {
+ const cases = [_][]const []const i32{
+ &[_][]const i32{&[_]i32{1}},
+ &[_][]const i32{&[_]i32{ 2, 3 }},
+ &[_][]const i32{
+ &[_]i32{4},
+ &[_]i32{ 5, 6, 7 },
+ },
+ };
+ try check(&cases);
+
+ const cases2 = [_][]const i32{
+ &[_]i32{1},
+ &[_]i32{ two, 3 },
+ };
+ try expect(cases2.len == 2);
+ try expect(cases2[0].len == 1);
+ try expect(cases2[0][0] == 1);
+ try expect(cases2[1].len == 2);
+ try expect(cases2[1][0] == 2);
+ try expect(cases2[1][1] == 3);
+
+ const cases3 = [_][]const []const i32{
+ &[_][]const i32{&[_]i32{1}},
+ &[_][]const i32{&[_]i32{ two, 3 }},
+ &[_][]const i32{
+ &[_]i32{4},
+ &[_]i32{ 5, 6, 7 },
+ },
+ };
+ try check(&cases3);
+ }
+
+ fn check(cases: []const []const []const i32) !void {
+ try expect(cases.len == 3);
+ try expect(cases[0].len == 1);
+ try expect(cases[0][0].len == 1);
+ try expect(cases[0][0][0] == 1);
+ try expect(cases[1].len == 1);
+ try expect(cases[1][0].len == 2);
+ try expect(cases[1][0][0] == 2);
+ try expect(cases[1][0][1] == 3);
+ try expect(cases[2].len == 2);
+ try expect(cases[2][0].len == 1);
+ try expect(cases[2][0][0] == 4);
+ try expect(cases[2][1].len == 3);
+ try expect(cases[2][1][0] == 5);
+ try expect(cases[2][1][1] == 6);
+ try expect(cases[2][1][2] == 7);
+ }
+ };
+ try S.entry(2);
+ comptime try S.entry(2);
+}
+
+test "anonymous literal in array" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Foo = struct {
+ a: usize = 2,
+ b: usize = 4,
+ };
+ fn doTheTest() !void {
+ var array: [2]Foo = .{
+ .{ .a = 3 },
+ .{ .b = 3 },
+ };
+ try expect(array[0].a == 3);
+ try expect(array[0].b == 4);
+ try expect(array[1].a == 2);
+ try expect(array[1].b == 3);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "access the null element of a null terminated array" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var array: [4:0]u8 = .{ 'a', 'o', 'e', 'u' };
+ try expect(array[4] == 0);
+ var len: usize = 4;
+ try expect(array[len] == 0);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "type deduction for array subscript expression" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var array = [_]u8{ 0x55, 0xAA };
+ var v0 = true;
+ try expect(@as(u8, 0xAA) == array[if (v0) 1 else 0]);
+ var v1 = false;
+ try expect(@as(u8, 0x55) == array[if (v1) 1 else 0]);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "sentinel element count towards the ABI size calculation" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ const T = packed struct {
+ fill_pre: u8 = 0x55,
+ data: [0:0]u8 = undefined,
+ fill_post: u8 = 0xAA,
+ };
+ var x = T{};
+ var as_slice = mem.asBytes(&x);
+ try expect(@as(usize, 3) == as_slice.len);
+ try expect(@as(u8, 0x55) == as_slice[0]);
+ try expect(@as(u8, 0xAA) == as_slice[2]);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "zero-sized array with recursive type definition" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const U = struct {
+ fn foo(comptime T: type, comptime n: usize) type {
+ return struct {
+ s: [n]T,
+ x: usize = n,
+ };
+ }
+ };
+
+ const S = struct {
+ list: U.foo(@This(), 0),
+ };
+
+ var t: S = .{ .list = .{ .s = undefined } };
+ try expect(@as(usize, 0) == t.list.x);
+}
+
+test "type coercion of anon struct literal to array" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const U = union {
+ a: u32,
+ b: bool,
+ c: []const u8,
+ };
+
+ fn doTheTest() !void {
+ var x1: u8 = 42;
+ const t1 = .{ x1, 56, 54 };
+ var arr1: [3]u8 = t1;
+ try expect(arr1[0] == 42);
+ try expect(arr1[1] == 56);
+ try expect(arr1[2] == 54);
+
+ if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ var x2: U = .{ .a = 42 };
+ const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } };
+ var arr2: [3]U = t2;
+ try expect(arr2[0].a == 42);
+ try expect(arr2[1].b == true);
+ try expect(mem.eql(u8, arr2[2].c, "hello"));
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "type coercion of pointer to anon struct literal to pointer to array" {
+ if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const U = union {
+ a: u32,
+ b: bool,
+ c: []const u8,
+ };
+
+ fn doTheTest() !void {
+ var x1: u8 = 42;
+ const t1 = &.{ x1, 56, 54 };
+ var arr1: *const [3]u8 = t1;
+ try expect(arr1[0] == 42);
+ try expect(arr1[1] == 56);
+ try expect(arr1[2] == 54);
+
+ var x2: U = .{ .a = 42 };
+ const t2 = &.{ x2, .{ .b = true }, .{ .c = "hello" } };
+ var arr2: *const [3]U = t2;
+ try expect(arr2[0].a == 42);
+ try expect(arr2[1].b == true);
+ try expect(mem.eql(u8, arr2[2].c, "hello"));
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
diff --git a/test/behavior/array_llvm.zig b/test/behavior/array_llvm.zig
deleted file mode 100644
index c3df5ba837..0000000000
--- a/test/behavior/array_llvm.zig
+++ /dev/null
@@ -1,315 +0,0 @@
-const std = @import("std");
-const testing = std.testing;
-const expect = testing.expect;
-const mem = std.mem;
-
-var s_array: [8]Sub = undefined;
-const Sub = struct { b: u8 };
-const Str = struct { a: []Sub };
-test "set global var array via slice embedded in struct" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var s = Str{ .a = s_array[0..] };
-
- s.a[0].b = 1;
- s.a[1].b = 2;
- s.a[2].b = 3;
-
- try expect(s_array[0].b == 1);
- try expect(s_array[1].b == 2);
- try expect(s_array[2].b == 3);
-}
-
-test "read/write through global variable array of struct fields initialized via array mult" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn doTheTest() !void {
- try expect(storage[0].term == 1);
- storage[0] = MyStruct{ .term = 123 };
- try expect(storage[0].term == 123);
- }
-
- pub const MyStruct = struct {
- term: usize,
- };
-
- var storage: [1]MyStruct = [_]MyStruct{MyStruct{ .term = 1 }} ** 1;
- };
- try S.doTheTest();
-}
-
-test "implicit cast single-item pointer" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- try testImplicitCastSingleItemPtr();
- comptime try testImplicitCastSingleItemPtr();
-}
-
-fn testImplicitCastSingleItemPtr() !void {
- var byte: u8 = 100;
- const slice = @as(*[1]u8, &byte)[0..];
- slice[0] += 1;
- try expect(byte == 101);
-}
-
-fn testArrayByValAtComptime(b: [2]u8) u8 {
- return b[0];
-}
-
-test "comptime evaluating function that takes array by value" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const arr = [_]u8{ 1, 2 };
- const x = comptime testArrayByValAtComptime(arr);
- const y = comptime testArrayByValAtComptime(arr);
- try expect(x == 1);
- try expect(y == 1);
-}
-
-test "runtime initialize array elem and then implicit cast to slice" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var two: i32 = 2;
- const x: []const i32 = &[_]i32{two};
- try expect(x[0] == 2);
-}
-
-test "array literal as argument to function" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn entry(two: i32) !void {
- try foo(&[_]i32{ 1, 2, 3 });
- try foo(&[_]i32{ 1, two, 3 });
- try foo2(true, &[_]i32{ 1, 2, 3 });
- try foo2(true, &[_]i32{ 1, two, 3 });
- }
- fn foo(x: []const i32) !void {
- try expect(x[0] == 1);
- try expect(x[1] == 2);
- try expect(x[2] == 3);
- }
- fn foo2(trash: bool, x: []const i32) !void {
- try expect(trash);
- try expect(x[0] == 1);
- try expect(x[1] == 2);
- try expect(x[2] == 3);
- }
- };
- try S.entry(2);
- comptime try S.entry(2);
-}
-
-test "double nested array to const slice cast in array literal" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn entry(two: i32) !void {
- const cases = [_][]const []const i32{
- &[_][]const i32{&[_]i32{1}},
- &[_][]const i32{&[_]i32{ 2, 3 }},
- &[_][]const i32{
- &[_]i32{4},
- &[_]i32{ 5, 6, 7 },
- },
- };
- try check(&cases);
-
- const cases2 = [_][]const i32{
- &[_]i32{1},
- &[_]i32{ two, 3 },
- };
- try expect(cases2.len == 2);
- try expect(cases2[0].len == 1);
- try expect(cases2[0][0] == 1);
- try expect(cases2[1].len == 2);
- try expect(cases2[1][0] == 2);
- try expect(cases2[1][1] == 3);
-
- const cases3 = [_][]const []const i32{
- &[_][]const i32{&[_]i32{1}},
- &[_][]const i32{&[_]i32{ two, 3 }},
- &[_][]const i32{
- &[_]i32{4},
- &[_]i32{ 5, 6, 7 },
- },
- };
- try check(&cases3);
- }
-
- fn check(cases: []const []const []const i32) !void {
- try expect(cases.len == 3);
- try expect(cases[0].len == 1);
- try expect(cases[0][0].len == 1);
- try expect(cases[0][0][0] == 1);
- try expect(cases[1].len == 1);
- try expect(cases[1][0].len == 2);
- try expect(cases[1][0][0] == 2);
- try expect(cases[1][0][1] == 3);
- try expect(cases[2].len == 2);
- try expect(cases[2][0].len == 1);
- try expect(cases[2][0][0] == 4);
- try expect(cases[2][1].len == 3);
- try expect(cases[2][1][0] == 5);
- try expect(cases[2][1][1] == 6);
- try expect(cases[2][1][2] == 7);
- }
- };
- try S.entry(2);
- comptime try S.entry(2);
-}
-
-test "anonymous literal in array" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- const Foo = struct {
- a: usize = 2,
- b: usize = 4,
- };
- fn doTheTest() !void {
- var array: [2]Foo = .{
- .{ .a = 3 },
- .{ .b = 3 },
- };
- try expect(array[0].a == 3);
- try expect(array[0].b == 4);
- try expect(array[1].a == 2);
- try expect(array[1].b == 3);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "access the null element of a null terminated array" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn doTheTest() !void {
- var array: [4:0]u8 = .{ 'a', 'o', 'e', 'u' };
- try expect(array[4] == 0);
- var len: usize = 4;
- try expect(array[len] == 0);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "type deduction for array subscript expression" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn doTheTest() !void {
- var array = [_]u8{ 0x55, 0xAA };
- var v0 = true;
- try expect(@as(u8, 0xAA) == array[if (v0) 1 else 0]);
- var v1 = false;
- try expect(@as(u8, 0x55) == array[if (v1) 1 else 0]);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "sentinel element count towards the ABI size calculation" {
- if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- const S = struct {
- fn doTheTest() !void {
- const T = packed struct {
- fill_pre: u8 = 0x55,
- data: [0:0]u8 = undefined,
- fill_post: u8 = 0xAA,
- };
- var x = T{};
- var as_slice = mem.asBytes(&x);
- try expect(@as(usize, 3) == as_slice.len);
- try expect(@as(u8, 0x55) == as_slice[0]);
- try expect(@as(u8, 0xAA) == as_slice[2]);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "zero-sized array with recursive type definition" {
- if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- const U = struct {
- fn foo(comptime T: type, comptime n: usize) type {
- return struct {
- s: [n]T,
- x: usize = n,
- };
- }
- };
-
- const S = struct {
- list: U.foo(@This(), 0),
- };
-
- var t: S = .{ .list = .{ .s = undefined } };
- try expect(@as(usize, 0) == t.list.x);
-}
-
-test "type coercion of anon struct literal to array" {
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- const U = union {
- a: u32,
- b: bool,
- c: []const u8,
- };
-
- fn doTheTest() !void {
- var x1: u8 = 42;
- const t1 = .{ x1, 56, 54 };
- var arr1: [3]u8 = t1;
- try expect(arr1[0] == 42);
- try expect(arr1[1] == 56);
- try expect(arr1[2] == 54);
-
- if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-
- var x2: U = .{ .a = 42 };
- const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } };
- var arr2: [3]U = t2;
- try expect(arr2[0].a == 42);
- try expect(arr2[1].b == true);
- try expect(mem.eql(u8, arr2[2].c, "hello"));
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "type coercion of pointer to anon struct literal to pointer to array" {
- if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- const S = struct {
- const U = union {
- a: u32,
- b: bool,
- c: []const u8,
- };
-
- fn doTheTest() !void {
- var x1: u8 = 42;
- const t1 = &.{ x1, 56, 54 };
- var arr1: *const [3]u8 = t1;
- try expect(arr1[0] == 42);
- try expect(arr1[1] == 56);
- try expect(arr1[2] == 54);
-
- var x2: U = .{ .a = 42 };
- const t2 = &.{ x2, .{ .b = true }, .{ .c = "hello" } };
- var arr2: *const [3]U = t2;
- try expect(arr2[0].a == 42);
- try expect(arr2[1].b == true);
- try expect(mem.eql(u8, arr2[2].c, "hello"));
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 79f75f773c..4028d8c5f1 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -1,8 +1,9 @@
+const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const mem = std.mem;
const maxInt = std.math.maxInt;
-const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
test "int to ptr cast" {
const x = @as(usize, 13);
@@ -93,7 +94,8 @@ test "comptime_int @intToFloat" {
}
test "@floatToInt" {
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
try testFloatToInts();
comptime try testFloatToInts();
@@ -802,3 +804,337 @@ test "comptime float casts" {
try expectFloatToInt(comptime_int, 1234, i16, 1234);
try expectFloatToInt(comptime_float, 12.3, comptime_int, 12);
}
+
+test "pointer reinterpret const float to int" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ // The hex representation is 0x3fe3333333333303.
+ const float: f64 = 5.99999999999994648725e-01;
+ const float_ptr = &float;
+ const int_ptr = @ptrCast(*const i32, float_ptr);
+ const int_val = int_ptr.*;
+ if (native_endian == .Little)
+ try expect(int_val == 0x33333303)
+ else
+ try expect(int_val == 0x3fe33333);
+}
+
+test "implicit cast from [*]T to ?*anyopaque" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var a = [_]u8{ 3, 2, 1 };
+ var runtime_zero: usize = 0;
+ incrementVoidPtrArray(a[runtime_zero..].ptr, 3);
+ try expect(std.mem.eql(u8, &a, &[_]u8{ 4, 3, 2 }));
+}
+
+fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
+ var n: usize = 0;
+ while (n < len) : (n += 1) {
+ @ptrCast([*]u8, array.?)[n] += 1;
+ }
+}
+
+test "compile time int to ptr of function" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ try foobar(FUNCTION_CONSTANT);
+}
+
+pub const FUNCTION_CONSTANT = @intToPtr(PFN_void, maxInt(usize));
+pub const PFN_void = *const fn (*anyopaque) callconv(.C) void;
+
+fn foobar(func: PFN_void) !void {
+ try std.testing.expect(@ptrToInt(func) == maxInt(usize));
+}
+
+test "implicit ptr to *anyopaque" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var a: u32 = 1;
+ var ptr: *align(@alignOf(u32)) anyopaque = &a;
+ var b: *u32 = @ptrCast(*u32, ptr);
+ try expect(b.* == 1);
+ var ptr2: ?*align(@alignOf(u32)) anyopaque = &a;
+ var c: *u32 = @ptrCast(*u32, ptr2.?);
+ try expect(c.* == 1);
+}
+
+test "return null from fn() anyerror!?&T" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const a = returnNullFromOptionalTypeErrorRef();
+ const b = returnNullLitFromOptionalTypeErrorRef();
+ try expect((try a) == null and (try b) == null);
+}
+fn returnNullFromOptionalTypeErrorRef() anyerror!?*A {
+ const a: ?*A = null;
+ return a;
+}
+fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
+ return null;
+}
+
+test "peer type resolution: [0]u8 and []const u8" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
+ try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
+ comptime {
+ try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
+ try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
+ }
+}
+fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
+ if (a) {
+ return &[_]u8{};
+ }
+
+ return slice[0..1];
+}
+
+test "implicitly cast from [N]T to ?[]const T" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
+ comptime try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
+}
+
+fn castToOptionalSlice() ?[]const u8 {
+ return "hi";
+}
+
+test "cast u128 to f128 and back" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ comptime try testCast128();
+ try testCast128();
+}
+
+fn testCast128() !void {
+ try expect(cast128Int(cast128Float(0x7fff0000000000000000000000000000)) == 0x7fff0000000000000000000000000000);
+}
+
+fn cast128Int(x: f128) u128 {
+ return @bitCast(u128, x);
+}
+
+fn cast128Float(x: u128) f128 {
+ return @bitCast(f128, x);
+}
+
+test "implicit cast from *[N]T to ?[*]T" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var x: ?[*]u16 = null;
+ var y: [4]u16 = [4]u16{ 0, 1, 2, 3 };
+
+ x = &y;
+ try expect(std.mem.eql(u16, x.?[0..4], y[0..4]));
+ x.?[0] = 8;
+ y[3] = 6;
+ try expect(std.mem.eql(u16, x.?[0..4], y[0..4]));
+}
+
+test "implicit cast from *T to ?*anyopaque" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var a: u8 = 1;
+ incrementVoidPtrValue(&a);
+ try std.testing.expect(a == 2);
+}
+
+fn incrementVoidPtrValue(value: ?*anyopaque) void {
+ @ptrCast(*u8, value.?).* += 1;
+}
+
+test "implicit cast *[0]T to E![]const u8" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var x = @as(anyerror![]const u8, &[0]u8{});
+ try expect((x catch unreachable).len == 0);
+}
+
+var global_array: [4]u8 = undefined;
+test "cast from array reference to fn: comptime fn ptr" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const f = @ptrCast(*const fn () callconv(.C) void, &global_array);
+ try expect(@ptrToInt(f) == @ptrToInt(&global_array));
+}
+test "cast from array reference to fn: runtime fn ptr" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var f = @ptrCast(*const fn () callconv(.C) void, &global_array);
+ try expect(@ptrToInt(f) == @ptrToInt(&global_array));
+}
+
+test "*const [N]null u8 to ?[]const u8" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var a = "Hello";
+ var b: ?[]const u8 = a;
+ try expect(mem.eql(u8, b.?, "Hello"));
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "cast between [*c]T and ?[*:0]T on fn parameter" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Handler = ?fn ([*c]const u8) callconv(.C) void;
+ fn addCallback(handler: Handler) void {
+ _ = handler;
+ }
+
+ fn myCallback(cstr: ?[*:0]const u8) callconv(.C) void {
+ _ = cstr;
+ }
+
+ fn doTheTest() void {
+ addCallback(myCallback);
+ }
+ };
+ S.doTheTest();
+}
+
+var global_struct: struct { f0: usize } = undefined;
+test "assignment to optional pointer result loc" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct };
+ try expect(foo.ptr.? == @ptrCast(*anyopaque, &global_struct));
+}
+
+test "cast between *[N]void and []void" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var a: [4]void = undefined;
+ var b: []void = &a;
+ try expect(b.len == 4);
+}
+
+test "peer resolve arrays of different size to const slice" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ try expect(mem.eql(u8, boolToStr(true), "true"));
+ try expect(mem.eql(u8, boolToStr(false), "false"));
+ comptime try expect(mem.eql(u8, boolToStr(true), "true"));
+ comptime try expect(mem.eql(u8, boolToStr(false), "false"));
+}
+fn boolToStr(b: bool) []const u8 {
+ return if (b) "true" else "false";
+}
+
+test "cast f16 to wider types" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var x: f16 = 1234.0;
+ try expect(@as(f32, 1234.0) == x);
+ try expect(@as(f64, 1234.0) == x);
+ try expect(@as(f128, 1234.0) == x);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "cast f128 to narrower types" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var x: f128 = 1234.0;
+ try expect(@as(f16, 1234.0) == @floatCast(f16, x));
+ try expect(@as(f32, 1234.0) == @floatCast(f32, x));
+ try expect(@as(f64, 1234.0) == @floatCast(f64, x));
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "peer type resolution: unreachable, null, slice" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest(num: usize, word: []const u8) !void {
+ const result = switch (num) {
+ 0 => null,
+ 1 => word,
+ else => unreachable,
+ };
+ try expect(mem.eql(u8, result.?, "hi"));
+ }
+ };
+ try S.doTheTest(1, "hi");
+}
+
+test "cast i8 fn call peers to i32 result" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var cond = true;
+ const value: i32 = if (cond) smallBoi() else bigBoi();
+ try expect(value == 123);
+ }
+ fn smallBoi() i8 {
+ return 123;
+ }
+ fn bigBoi() i16 {
+ return 1234;
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
diff --git a/test/behavior/cast_llvm.zig b/test/behavior/cast_llvm.zig
deleted file mode 100644
index 6f9b77b8f2..0000000000
--- a/test/behavior/cast_llvm.zig
+++ /dev/null
@@ -1,299 +0,0 @@
-const builtin = @import("builtin");
-const std = @import("std");
-const expect = std.testing.expect;
-const mem = std.mem;
-const maxInt = std.math.maxInt;
-const native_endian = builtin.target.cpu.arch.endian();
-
-test "pointer reinterpret const float to int" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- // The hex representation is 0x3fe3333333333303.
- const float: f64 = 5.99999999999994648725e-01;
- const float_ptr = &float;
- const int_ptr = @ptrCast(*const i32, float_ptr);
- const int_val = int_ptr.*;
- if (native_endian == .Little)
- try expect(int_val == 0x33333303)
- else
- try expect(int_val == 0x3fe33333);
-}
-
-test "@floatToInt" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- try testFloatToInts();
- comptime try testFloatToInts();
-}
-
-fn testFloatToInts() !void {
- try expectFloatToInt(f16, 255.1, u8, 255);
- try expectFloatToInt(f16, 127.2, i8, 127);
- try expectFloatToInt(f16, -128.2, i8, -128);
-}
-
-fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void {
- try expect(@floatToInt(I, f) == i);
-}
-
-test "implicit cast from [*]T to ?*anyopaque" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- var a = [_]u8{ 3, 2, 1 };
- var runtime_zero: usize = 0;
- incrementVoidPtrArray(a[runtime_zero..].ptr, 3);
- try expect(std.mem.eql(u8, &a, &[_]u8{ 4, 3, 2 }));
-}
-
-fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
- var n: usize = 0;
- while (n < len) : (n += 1) {
- @ptrCast([*]u8, array.?)[n] += 1;
- }
-}
-
-test "compile time int to ptr of function" {
- if (builtin.zig_backend == .stage1) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- try foobar(FUNCTION_CONSTANT);
-}
-
-pub const FUNCTION_CONSTANT = @intToPtr(PFN_void, maxInt(usize));
-pub const PFN_void = *const fn (*anyopaque) callconv(.C) void;
-
-fn foobar(func: PFN_void) !void {
- try std.testing.expect(@ptrToInt(func) == maxInt(usize));
-}
-
-test "implicit ptr to *anyopaque" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
- var a: u32 = 1;
- var ptr: *align(@alignOf(u32)) anyopaque = &a;
- var b: *u32 = @ptrCast(*u32, ptr);
- try expect(b.* == 1);
- var ptr2: ?*align(@alignOf(u32)) anyopaque = &a;
- var c: *u32 = @ptrCast(*u32, ptr2.?);
- try expect(c.* == 1);
-}
-
-const A = struct {
- a: i32,
-};
-test "return null from fn() anyerror!?&T" {
- const a = returnNullFromOptionalTypeErrorRef();
- const b = returnNullLitFromOptionalTypeErrorRef();
- try expect((try a) == null and (try b) == null);
-}
-fn returnNullFromOptionalTypeErrorRef() anyerror!?*A {
- const a: ?*A = null;
- return a;
-}
-fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
- return null;
-}
-
-test "peer type resolution: [0]u8 and []const u8" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
- try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
- comptime {
- try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
- try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
- }
-}
-fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
- if (a) {
- return &[_]u8{};
- }
-
- return slice[0..1];
-}
-
-test "implicitly cast from [N]T to ?[]const T" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
- comptime try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
-}
-
-fn castToOptionalSlice() ?[]const u8 {
- return "hi";
-}
-
-test "cast u128 to f128 and back" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- comptime try testCast128();
- try testCast128();
-}
-
-fn testCast128() !void {
- try expect(cast128Int(cast128Float(0x7fff0000000000000000000000000000)) == 0x7fff0000000000000000000000000000);
-}
-
-fn cast128Int(x: f128) u128 {
- return @bitCast(u128, x);
-}
-
-fn cast128Float(x: u128) f128 {
- return @bitCast(f128, x);
-}
-
-test "implicit cast from *[N]T to ?[*]T" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var x: ?[*]u16 = null;
- var y: [4]u16 = [4]u16{ 0, 1, 2, 3 };
-
- x = &y;
- try expect(std.mem.eql(u16, x.?[0..4], y[0..4]));
- x.?[0] = 8;
- y[3] = 6;
- try expect(std.mem.eql(u16, x.?[0..4], y[0..4]));
-}
-
-test "implicit cast from *T to ?*anyopaque" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var a: u8 = 1;
- incrementVoidPtrValue(&a);
- try std.testing.expect(a == 2);
-}
-
-fn incrementVoidPtrValue(value: ?*anyopaque) void {
- @ptrCast(*u8, value.?).* += 1;
-}
-
-test "implicit cast *[0]T to E![]const u8" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var x = @as(anyerror![]const u8, &[0]u8{});
- try expect((x catch unreachable).len == 0);
-}
-
-var global_array: [4]u8 = undefined;
-test "cast from array reference to fn: comptime fn ptr" {
- const f = @ptrCast(*const fn () callconv(.C) void, &global_array);
- try expect(@ptrToInt(f) == @ptrToInt(&global_array));
-}
-test "cast from array reference to fn: runtime fn ptr" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var f = @ptrCast(*const fn () callconv(.C) void, &global_array);
- try expect(@ptrToInt(f) == @ptrToInt(&global_array));
-}
-
-test "*const [N]null u8 to ?[]const u8" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn doTheTest() !void {
- var a = "Hello";
- var b: ?[]const u8 = a;
- try expect(mem.eql(u8, b.?, "Hello"));
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "cast between [*c]T and ?[*:0]T on fn parameter" {
- const S = struct {
- const Handler = ?fn ([*c]const u8) callconv(.C) void;
- fn addCallback(handler: Handler) void {
- _ = handler;
- }
-
- fn myCallback(cstr: ?[*:0]const u8) callconv(.C) void {
- _ = cstr;
- }
-
- fn doTheTest() void {
- addCallback(myCallback);
- }
- };
- S.doTheTest();
-}
-
-var global_struct: struct { f0: usize } = undefined;
-test "assignment to optional pointer result loc" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct };
- try expect(foo.ptr.? == @ptrCast(*anyopaque, &global_struct));
-}
-
-test "cast between *[N]void and []void" {
- var a: [4]void = undefined;
- var b: []void = &a;
- try expect(b.len == 4);
-}
-
-test "peer resolve arrays of different size to const slice" {
- try expect(mem.eql(u8, boolToStr(true), "true"));
- try expect(mem.eql(u8, boolToStr(false), "false"));
- comptime try expect(mem.eql(u8, boolToStr(true), "true"));
- comptime try expect(mem.eql(u8, boolToStr(false), "false"));
-}
-fn boolToStr(b: bool) []const u8 {
- return if (b) "true" else "false";
-}
-
-test "cast f16 to wider types" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- const S = struct {
- fn doTheTest() !void {
- var x: f16 = 1234.0;
- try expect(@as(f32, 1234.0) == x);
- try expect(@as(f64, 1234.0) == x);
- try expect(@as(f128, 1234.0) == x);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "cast f128 to narrower types" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-
- const S = struct {
- fn doTheTest() !void {
- var x: f128 = 1234.0;
- try expect(@as(f16, 1234.0) == @floatCast(f16, x));
- try expect(@as(f32, 1234.0) == @floatCast(f32, x));
- try expect(@as(f64, 1234.0) == @floatCast(f64, x));
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "peer type resolution: unreachable, null, slice" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- const S = struct {
- fn doTheTest(num: usize, word: []const u8) !void {
- const result = switch (num) {
- 0 => null,
- 1 => word,
- else => unreachable,
- };
- try expect(mem.eql(u8, result.?, "hi"));
- }
- };
- try S.doTheTest(1, "hi");
-}
-
-test "cast i8 fn call peers to i32 result" {
- const S = struct {
- fn doTheTest() !void {
- var cond = true;
- const value: i32 = if (cond) smallBoi() else bigBoi();
- try expect(value == 123);
- }
- fn smallBoi() i8 {
- return 123;
- }
- fn bigBoi() i16 {
- return 1234;
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index d58ad6ccb5..2e243d1d23 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -478,3 +478,29 @@ test "error union comptime caching" {
S.quux(@as(anyerror!void, {}));
S.quux(@as(anyerror!void, {}));
}
+
+test "@errorName" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ try expect(mem.eql(u8, @errorName(error.AnError), "AnError"));
+ try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
+ try expect(mem.eql(u8, @errorName(gimmeItBroke()), "ItBroke"));
+}
+fn gimmeItBroke() anyerror {
+ return error.ItBroke;
+}
+
+test "@errorName sentinel length matches slice length" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const name = testBuiltinErrorName(error.FooBar);
+ const length: usize = 6;
+ try expect(length == std.mem.indexOfSentinel(u8, 0, name.ptr));
+ try expect(length == name.len);
+}
+
+pub fn testBuiltinErrorName(err: anyerror) [:0]const u8 {
+ return @errorName(err);
+}
diff --git a/test/behavior/error_llvm.zig b/test/behavior/error_llvm.zig
deleted file mode 100644
index edebd5f629..0000000000
--- a/test/behavior/error_llvm.zig
+++ /dev/null
@@ -1,24 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const mem = std.mem;
-
-fn gimmeItBroke() anyerror {
- return error.ItBroke;
-}
-
-test "@errorName" {
- try expect(mem.eql(u8, @errorName(error.AnError), "AnError"));
- try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
- try expect(mem.eql(u8, @errorName(gimmeItBroke()), "ItBroke"));
-}
-
-test "@errorName sentinel length matches slice length" {
- const name = testBuiltinErrorName(error.FooBar);
- const length: usize = 6;
- try expect(length == std.mem.indexOfSentinel(u8, 0, name.ptr));
- try expect(length == name.len);
-}
-
-pub fn testBuiltinErrorName(err: anyerror) [:0]const u8 {
- return @errorName(err);
-}
diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig
index 2dbd4b3495..f359fe458e 100644
--- a/test/behavior/sizeof_and_typeof.zig
+++ b/test/behavior/sizeof_and_typeof.zig
@@ -1,3 +1,4 @@
+const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
@@ -160,3 +161,115 @@ test "@bitOffsetOf" {
try expect(@offsetOf(A, "f") * 8 == @bitOffsetOf(A, "f"));
try expect(@offsetOf(A, "g") * 8 == @bitOffsetOf(A, "g"));
}
+
+test "@sizeOf(T) == 0 doesn't force resolving struct size" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Foo = struct {
+ y: if (@sizeOf(Foo) == 0) u64 else u32,
+ };
+ const Bar = struct {
+ x: i32,
+ y: if (0 == @sizeOf(Bar)) u64 else u32,
+ };
+ };
+
+ try expect(@sizeOf(S.Foo) == 4);
+ try expect(@sizeOf(S.Bar) == 8);
+}
+
+test "@TypeOf() has no runtime side effects" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn foo(comptime T: type, ptr: *T) T {
+ ptr.* += 1;
+ return ptr.*;
+ }
+ };
+ var data: i32 = 0;
+ const T = @TypeOf(S.foo(i32, &data));
+ comptime try expect(T == i32);
+ try expect(data == 0);
+}
+
+test "branching logic inside @TypeOf" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ var data: i32 = 0;
+ fn foo() anyerror!i32 {
+ data += 1;
+ return undefined;
+ }
+ };
+ const T = @TypeOf(S.foo() catch undefined);
+ comptime try expect(T == i32);
+ try expect(S.data == 0);
+}
+
+test "@bitSizeOf" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try expect(@bitSizeOf(u2) == 2);
+ try expect(@bitSizeOf(u8) == @sizeOf(u8) * 8);
+ try expect(@bitSizeOf(struct {
+ a: u2,
+ }) == 8);
+ try expect(@bitSizeOf(packed struct {
+ a: u2,
+ }) == 2);
+}
+
+test "@sizeOf comparison against zero" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S0 = struct {
+ f: *@This(),
+ };
+ const U0 = union {
+ f: *@This(),
+ };
+ const S1 = struct {
+ fn H(comptime T: type) type {
+ return struct {
+ x: T,
+ };
+ }
+ f0: H(*@This()),
+ f1: H(**@This()),
+ f2: H(***@This()),
+ };
+ const U1 = union {
+ fn H(comptime T: type) type {
+ return struct {
+ x: T,
+ };
+ }
+ f0: H(*@This()),
+ f1: H(**@This()),
+ f2: H(***@This()),
+ };
+ const S = struct {
+ fn doTheTest(comptime T: type, comptime result: bool) !void {
+ try expectEqual(result, @sizeOf(T) > 0);
+ }
+ };
+ // Zero-sized type
+ try S.doTheTest(u0, false);
+ try S.doTheTest(*u0, false);
+ // Non byte-sized type
+ try S.doTheTest(u1, true);
+ try S.doTheTest(*u1, true);
+ // Regular type
+ try S.doTheTest(u8, true);
+ try S.doTheTest(*u8, true);
+ try S.doTheTest(f32, true);
+ try S.doTheTest(*f32, true);
+ // Container with ptr pointing to themselves
+ try S.doTheTest(S0, true);
+ try S.doTheTest(U0, true);
+ try S.doTheTest(S1, true);
+ try S.doTheTest(U1, true);
+}
diff --git a/test/behavior/sizeof_and_typeof_stage1.zig b/test/behavior/sizeof_and_typeof_stage1.zig
deleted file mode 100644
index 20cefef0e7..0000000000
--- a/test/behavior/sizeof_and_typeof_stage1.zig
+++ /dev/null
@@ -1,105 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const expectEqual = std.testing.expectEqual;
-
-test "@sizeOf(T) == 0 doesn't force resolving struct size" {
- const S = struct {
- const Foo = struct {
- y: if (@sizeOf(Foo) == 0) u64 else u32,
- };
- const Bar = struct {
- x: i32,
- y: if (0 == @sizeOf(Bar)) u64 else u32,
- };
- };
-
- try expect(@sizeOf(S.Foo) == 4);
- try expect(@sizeOf(S.Bar) == 8);
-}
-
-test "@TypeOf() has no runtime side effects" {
- const S = struct {
- fn foo(comptime T: type, ptr: *T) T {
- ptr.* += 1;
- return ptr.*;
- }
- };
- var data: i32 = 0;
- const T = @TypeOf(S.foo(i32, &data));
- comptime try expect(T == i32);
- try expect(data == 0);
-}
-
-test "branching logic inside @TypeOf" {
- const S = struct {
- var data: i32 = 0;
- fn foo() anyerror!i32 {
- data += 1;
- return undefined;
- }
- };
- const T = @TypeOf(S.foo() catch undefined);
- comptime try expect(T == i32);
- try expect(S.data == 0);
-}
-
-test "@bitSizeOf" {
- try expect(@bitSizeOf(u2) == 2);
- try expect(@bitSizeOf(u8) == @sizeOf(u8) * 8);
- try expect(@bitSizeOf(struct {
- a: u2,
- }) == 8);
- try expect(@bitSizeOf(packed struct {
- a: u2,
- }) == 2);
-}
-
-test "@sizeOf comparison against zero" {
- const S0 = struct {
- f: *@This(),
- };
- const U0 = union {
- f: *@This(),
- };
- const S1 = struct {
- fn H(comptime T: type) type {
- return struct {
- x: T,
- };
- }
- f0: H(*@This()),
- f1: H(**@This()),
- f2: H(***@This()),
- };
- const U1 = union {
- fn H(comptime T: type) type {
- return struct {
- x: T,
- };
- }
- f0: H(*@This()),
- f1: H(**@This()),
- f2: H(***@This()),
- };
- const S = struct {
- fn doTheTest(comptime T: type, comptime result: bool) !void {
- try expectEqual(result, @sizeOf(T) > 0);
- }
- };
- // Zero-sized type
- try S.doTheTest(u0, false);
- try S.doTheTest(*u0, false);
- // Non byte-sized type
- try S.doTheTest(u1, true);
- try S.doTheTest(*u1, true);
- // Regular type
- try S.doTheTest(u8, true);
- try S.doTheTest(*u8, true);
- try S.doTheTest(f32, true);
- try S.doTheTest(*f32, true);
- // Container with ptr pointing to themselves
- try S.doTheTest(S0, true);
- try S.doTheTest(U0, true);
- try S.doTheTest(S1, true);
- try S.doTheTest(U1, true);
-}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 03be28b9d1..ecdd6a1846 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -133,15 +133,6 @@ fn returnEmptyStructInstance() StructWithNoFields {
return empty_global_instance;
}
-const Node = struct {
- val: Val,
- next: *Node,
-};
-
-const Val = struct {
- x: i32,
-};
-
test "fn call of struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -294,3 +285,888 @@ const blah: packed struct {
test "bit field alignment" {
try expect(@TypeOf(&blah.b) == *align(1:3:1) const u3);
}
+
+const Node = struct {
+ val: Val,
+ next: *Node,
+};
+
+const Val = struct {
+ x: i32,
+};
+
+test "struct point to self" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var root: Node = undefined;
+ root.val.x = 1;
+
+ var node: Node = undefined;
+ node.next = &root;
+ node.val.x = 2;
+
+ root.next = &node;
+
+ try expect(node.next.next.next.val.x == 1);
+}
+
+test "void struct fields" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const foo = VoidStructFieldsFoo{
+ .a = void{},
+ .b = 1,
+ .c = void{},
+ };
+ try expect(foo.b == 1);
+ try expect(@sizeOf(VoidStructFieldsFoo) == 4);
+}
+const VoidStructFieldsFoo = struct {
+ a: void,
+ b: i32,
+ c: void,
+};
+
+test "return empty struct from fn" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ _ = testReturnEmptyStructFromFn();
+}
+const EmptyStruct2 = struct {};
+fn testReturnEmptyStructFromFn() EmptyStruct2 {
+ return EmptyStruct2{};
+}
+
+test "pass slice of empty struct to fn" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ try expect(testPassSliceOfEmptyStructToFn(&[_]EmptyStruct2{EmptyStruct2{}}) == 1);
+}
+fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
+ return slice.len;
+}
+
+test "self-referencing struct via array member" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const T = struct {
+ children: [1]*@This(),
+ };
+ var x: T = undefined;
+ x = T{ .children = .{&x} };
+ try expect(x.children[0] == &x);
+}
+
+test "empty struct method call" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const es = EmptyStruct{};
+ try expect(es.method() == 1234);
+}
+const EmptyStruct = struct {
+ fn method(es: *const EmptyStruct) i32 {
+ _ = es;
+ return 1234;
+ }
+};
+
+test "align 1 field before self referential align 8 field as slice return type" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const result = alloc(Expr);
+ try expect(result.len == 0);
+}
+
+const Expr = union(enum) {
+ Literal: u8,
+ Question: *Expr,
+};
+
+fn alloc(comptime T: type) []T {
+ return &[_]T{};
+}
+
+const APackedStruct = packed struct {
+ x: u8,
+ y: u8,
+};
+
+test "packed struct" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var foo = APackedStruct{
+ .x = 1,
+ .y = 2,
+ };
+ foo.y += 1;
+ const four = foo.x + foo.y;
+ try expect(four == 4);
+}
+
+const Foo24Bits = packed struct {
+ field: u24,
+};
+const Foo96Bits = packed struct {
+ a: u24,
+ b: u24,
+ c: u24,
+ d: u24,
+};
+
+test "packed struct 24bits" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ comptime {
+ try expect(@sizeOf(Foo24Bits) == 4);
+ if (@sizeOf(usize) == 4) {
+ try expect(@sizeOf(Foo96Bits) == 12);
+ } else {
+ try expect(@sizeOf(Foo96Bits) == 16);
+ }
+ }
+
+ var value = Foo96Bits{
+ .a = 0,
+ .b = 0,
+ .c = 0,
+ .d = 0,
+ };
+ value.a += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 0);
+ try expect(value.c == 0);
+ try expect(value.d == 0);
+
+ value.b += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 1);
+ try expect(value.c == 0);
+ try expect(value.d == 0);
+
+ value.c += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 1);
+ try expect(value.c == 1);
+ try expect(value.d == 0);
+
+ value.d += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 1);
+ try expect(value.c == 1);
+ try expect(value.d == 1);
+}
+
+test "runtime struct initialization of bitfield" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const s1 = Nibbles{
+ .x = x1,
+ .y = x1,
+ };
+ const s2 = Nibbles{
+ .x = @intCast(u4, x2),
+ .y = @intCast(u4, x2),
+ };
+
+ try expect(s1.x == x1);
+ try expect(s1.y == x1);
+ try expect(s2.x == @intCast(u4, x2));
+ try expect(s2.y == @intCast(u4, x2));
+}
+
+var x1 = @as(u4, 1);
+var x2 = @as(u8, 2);
+
+const Nibbles = packed struct {
+ x: u4,
+ y: u4,
+};
+
+const Bitfields = packed struct {
+ f1: u16,
+ f2: u16,
+ f3: u8,
+ f4: u8,
+ f5: u4,
+ f6: u4,
+ f7: u8,
+};
+
+test "native bit field understands endianness" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var all: u64 = if (native_endian != .Little)
+ 0x1111222233445677
+ else
+ 0x7765443322221111;
+ var bytes: [8]u8 = undefined;
+ @memcpy(&bytes, @ptrCast([*]u8, &all), 8);
+ var bitfields = @ptrCast(*Bitfields, &bytes).*;
+
+ try expect(bitfields.f1 == 0x1111);
+ try expect(bitfields.f2 == 0x2222);
+ try expect(bitfields.f3 == 0x33);
+ try expect(bitfields.f4 == 0x44);
+ try expect(bitfields.f5 == 0x5);
+ try expect(bitfields.f6 == 0x6);
+ try expect(bitfields.f7 == 0x77);
+}
+
+test "implicit cast packed struct field to const ptr" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const LevelUpMove = packed struct {
+ move_id: u9,
+ level: u7,
+
+ fn toInt(value: u7) u7 {
+ return value;
+ }
+ };
+
+ var lup: LevelUpMove = undefined;
+ lup.level = 12;
+ const res = LevelUpMove.toInt(lup.level);
+ try expect(res == 12);
+}
+
+test "zero-bit field in packed struct" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = packed struct {
+ x: u10,
+ y: void,
+ };
+ var x: S = undefined;
+ _ = x;
+}
+
+test "packed struct with non-ABI-aligned field" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = packed struct {
+ x: u9,
+ y: u183,
+ };
+ var s: S = undefined;
+ s.x = 1;
+ s.y = 42;
+ try expect(s.x == 1);
+ try expect(s.y == 42);
+}
+
+const BitField1 = packed struct {
+ a: u3,
+ b: u3,
+ c: u2,
+};
+
+const bit_field_1 = BitField1{
+ .a = 1,
+ .b = 2,
+ .c = 3,
+};
+
+test "bit field access" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var data = bit_field_1;
+ try expect(getA(&data) == 1);
+ try expect(getB(&data) == 2);
+ try expect(getC(&data) == 3);
+ comptime try expect(@sizeOf(BitField1) == 1);
+
+ data.b += 1;
+ try expect(data.b == 3);
+
+ data.a += 1;
+ try expect(data.a == 2);
+ try expect(data.b == 3);
+}
+
+fn getA(data: *const BitField1) u3 {
+ return data.a;
+}
+
+fn getB(data: *const BitField1) u3 {
+ return data.b;
+}
+
+fn getC(data: *const BitField1) u2 {
+ return data.c;
+}
+
+test "default struct initialization fields" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ a: i32 = 1234,
+ b: i32,
+ };
+ const x = S{
+ .b = 5,
+ };
+ var five: i32 = 5;
+ const y = S{
+ .b = five,
+ };
+ if (x.a + x.b != 1239) {
+ @compileError("it should be comptime known");
+ }
+ try expect(y.a == x.a);
+ try expect(y.b == x.b);
+ try expect(1239 == x.a + x.b);
+}
+
+// TODO revisit this test when doing https://github.com/ziglang/zig/issues/1512
+test "packed array 24bits" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ comptime {
+ try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
+ try expect(@sizeOf(FooArray24Bits) == 2 + 2 * 4 + 2);
+ }
+
+ var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
+ bytes[bytes.len - 1] = 0xaa;
+ const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
+ try expect(ptr.a == 0);
+ try expect(ptr.b[0].field == 0);
+ try expect(ptr.b[1].field == 0);
+ try expect(ptr.c == 0);
+
+ ptr.a = maxInt(u16);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == 0);
+ try expect(ptr.b[1].field == 0);
+ try expect(ptr.c == 0);
+
+ ptr.b[0].field = maxInt(u24);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == maxInt(u24));
+ try expect(ptr.b[1].field == 0);
+ try expect(ptr.c == 0);
+
+ ptr.b[1].field = maxInt(u24);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == maxInt(u24));
+ try expect(ptr.b[1].field == maxInt(u24));
+ try expect(ptr.c == 0);
+
+ ptr.c = maxInt(u16);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == maxInt(u24));
+ try expect(ptr.b[1].field == maxInt(u24));
+ try expect(ptr.c == maxInt(u16));
+
+ try expect(bytes[bytes.len - 1] == 0xaa);
+}
+
+const Foo32Bits = packed struct {
+ field: u24,
+ pad: u8,
+};
+
+const FooArray24Bits = packed struct {
+ a: u16,
+ b: [2]Foo32Bits,
+ c: u16,
+};
+
+test "aligned array of packed struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ comptime {
+ try expect(@sizeOf(FooStructAligned) == 2);
+ try expect(@sizeOf(FooArrayOfAligned) == 2 * 2);
+ }
+
+ var bytes = [_]u8{0xbb} ** @sizeOf(FooArrayOfAligned);
+ const ptr = &std.mem.bytesAsSlice(FooArrayOfAligned, bytes[0..])[0];
+
+ try expect(ptr.a[0].a == 0xbb);
+ try expect(ptr.a[0].b == 0xbb);
+ try expect(ptr.a[1].a == 0xbb);
+ try expect(ptr.a[1].b == 0xbb);
+}
+
+const FooStructAligned = packed struct {
+ a: u8,
+ b: u8,
+};
+
+const FooArrayOfAligned = packed struct {
+ a: [2]FooStructAligned,
+};
+
+test "pointer to packed struct member in a stack variable" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = packed struct {
+ a: u2,
+ b: u2,
+ };
+
+ var s = S{ .a = 2, .b = 0 };
+ var b_ptr = &s.b;
+ try expect(s.b == 0);
+ b_ptr.* = 2;
+ try expect(s.b == 2);
+}
+
+test "non-byte-aligned array inside packed struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Foo = packed struct {
+ a: bool,
+ b: [0x16]u8,
+ };
+ const S = struct {
+ fn bar(slice: []const u8) !void {
+ try expectEqualSlices(u8, slice, "abcdefghijklmnopqurstu");
+ }
+ fn doTheTest() !void {
+ var foo = Foo{
+ .a = true,
+ .b = "abcdefghijklmnopqurstu".*,
+ };
+ const value = foo.b;
+ try bar(&value);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "packed struct with u0 field access" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = packed struct {
+ f0: u0,
+ };
+ var s = S{ .f0 = 0 };
+ comptime try expect(s.f0 == 0);
+}
+
+test "access to global struct fields" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ g_foo.bar.value = 42;
+ try expect(g_foo.bar.value == 42);
+}
+
+const S0 = struct {
+ bar: S1,
+
+ pub const S1 = struct {
+ value: u8,
+ };
+
+ fn init() @This() {
+ return S0{ .bar = S1{ .value = 123 } };
+ }
+};
+
+var g_foo: S0 = S0.init();
+
+test "packed struct with fp fields" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = packed struct {
+ data: [3]f32,
+
+ pub fn frob(self: *@This()) void {
+ self.data[0] += self.data[1] + self.data[2];
+ self.data[1] += self.data[0] + self.data[2];
+ self.data[2] += self.data[0] + self.data[1];
+ }
+ };
+
+ var s: S = undefined;
+ s.data[0] = 1.0;
+ s.data[1] = 2.0;
+ s.data[2] = 3.0;
+ s.frob();
+ try expectEqual(@as(f32, 6.0), s.data[0]);
+ try expectEqual(@as(f32, 11.0), s.data[1]);
+ try expectEqual(@as(f32, 20.0), s.data[2]);
+}
+
+test "fn with C calling convention returns struct by value" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn entry() !void {
+ var x = makeBar(10);
+ try expectEqual(@as(i32, 10), x.handle);
+ }
+
+ const ExternBar = extern struct {
+ handle: i32,
+ };
+
+ fn makeBar(t: i32) callconv(.C) ExternBar {
+ return ExternBar{
+ .handle = t,
+ };
+ }
+ };
+ try S.entry();
+ comptime try S.entry();
+}
+
+test "non-packed struct with u128 entry in union" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const U = union(enum) {
+ Num: u128,
+ Void,
+ };
+
+ const S = struct {
+ f1: U,
+ f2: U,
+ };
+
+ var sx: S = undefined;
+ var s = &sx;
+ try std.testing.expect(@ptrToInt(&s.f2) - @ptrToInt(&s.f1) == @offsetOf(S, "f2"));
+ var v2 = U{ .Num = 123 };
+ s.f2 = v2;
+ try std.testing.expect(s.f2.Num == 123);
+}
+
+test "packed struct field passed to generic function" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const P = packed struct {
+ b: u5,
+ g: u5,
+ r: u5,
+ a: u1,
+ };
+
+ fn genericReadPackedField(ptr: anytype) u5 {
+ return ptr.*;
+ }
+ };
+
+ var p: S.P = undefined;
+ p.b = 29;
+ var loaded = S.genericReadPackedField(&p.b);
+ try expect(loaded == 29);
+}
+
+test "anonymous struct literal syntax" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Point = struct {
+ x: i32,
+ y: i32,
+ };
+
+ fn doTheTest() !void {
+ var p: Point = .{
+ .x = 1,
+ .y = 2,
+ };
+ try expect(p.x == 1);
+ try expect(p.y == 2);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "fully anonymous struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ try dump(.{
+ .int = @as(u32, 1234),
+ .float = @as(f64, 12.34),
+ .b = true,
+ .s = "hi",
+ });
+ }
+ fn dump(args: anytype) !void {
+ try expect(args.int == 1234);
+ try expect(args.float == 12.34);
+ try expect(args.b);
+ try expect(args.s[0] == 'h');
+ try expect(args.s[1] == 'i');
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "fully anonymous list literal" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" });
+ }
+ fn dump(args: anytype) !void {
+ try expect(args.@"0" == 1234);
+ try expect(args.@"1" == 12.34);
+ try expect(args.@"2");
+ try expect(args.@"3"[0] == 'h');
+ try expect(args.@"3"[1] == 'i');
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "anonymous struct literal assigned to variable" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ var vec = .{ @as(i32, 22), @as(i32, 55), @as(i32, 99) };
+ try expect(vec.@"0" == 22);
+ try expect(vec.@"1" == 55);
+ try expect(vec.@"2" == 99);
+}
+
+test "comptime struct field" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const T = struct {
+ a: i32,
+ comptime b: i32 = 1234,
+ };
+
+ var foo: T = undefined;
+ comptime try expect(foo.b == 1234);
+}
+
+test "anon struct literal field value initialized with fn call" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn doTheTest() !void {
+ var x = .{foo()};
+ try expectEqualSlices(u8, x[0], "hi");
+ }
+ fn foo() []const u8 {
+ return "hi";
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "struct with union field" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Value = struct {
+ ref: u32 = 2,
+ kind: union(enum) {
+ None: usize,
+ Bool: bool,
+ },
+ };
+
+ var True = Value{
+ .kind = .{ .Bool = true },
+ };
+ try expectEqual(@as(u32, 2), True.ref);
+ try expectEqual(true, True.kind.Bool);
+}
+
+test "type coercion of anon struct literal to struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const S2 = struct {
+ A: u32,
+ B: []const u8,
+ C: void,
+ D: Foo = .{},
+ };
+
+ const Foo = struct {
+ field: i32 = 1234,
+ };
+
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = .{ .A = 123, .B = "foo", .C = {} };
+ const t1 = .{ .A = y, .B = "foo", .C = {} };
+ const y0: S2 = t0;
+ var y1: S2 = t1;
+ try expect(y0.A == 123);
+ try expect(std.mem.eql(u8, y0.B, "foo"));
+ try expect(y0.C == {});
+ try expect(y0.D.field == 1234);
+ try expect(y1.A == y);
+ try expect(std.mem.eql(u8, y1.B, "foo"));
+ try expect(y1.C == {});
+ try expect(y1.D.field == 1234);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "type coercion of pointer to anon struct literal to pointer to struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const S2 = struct {
+ A: u32,
+ B: []const u8,
+ C: void,
+ D: Foo = .{},
+ };
+
+ const Foo = struct {
+ field: i32 = 1234,
+ };
+
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = &.{ .A = 123, .B = "foo", .C = {} };
+ const t1 = &.{ .A = y, .B = "foo", .C = {} };
+ const y0: *const S2 = t0;
+ var y1: *const S2 = t1;
+ try expect(y0.A == 123);
+ try expect(std.mem.eql(u8, y0.B, "foo"));
+ try expect(y0.C == {});
+ try expect(y0.D.field == 1234);
+ try expect(y1.A == y);
+ try expect(std.mem.eql(u8, y1.B, "foo"));
+ try expect(y1.C == {});
+ try expect(y1.D.field == 1234);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "packed struct with undefined initializers" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const P = packed struct {
+ a: u3,
+ _a: u3 = undefined,
+ b: u3,
+ _b: u3 = undefined,
+ c: u3,
+ _c: u3 = undefined,
+ };
+
+ fn doTheTest() !void {
+ var p: P = undefined;
+ p = P{ .a = 2, .b = 4, .c = 6 };
+ // Make sure the compiler doesn't touch the unprefixed fields.
+ // Use expect since i386-linux doesn't like expectEqual
+ try expect(p.a == 2);
+ try expect(p.b == 4);
+ try expect(p.c == 6);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "for loop over pointers to struct, getting field from struct pointer" {
+ // When enabling this test, be careful. I have observed it to pass when compiling
+ // stage2 alone, but when using stage1 with -fno-stage1 -fLLVM it fails.
+ // Maybe eyeball the LLVM that it generates and run in valgrind, both the compiler
+ // and the generated test at runtime.
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ const Foo = struct {
+ name: []const u8,
+ };
+
+ var ok = true;
+
+ fn eql(a: []const u8) bool {
+ _ = a;
+ return true;
+ }
+
+ const ArrayList = struct {
+ fn toSlice(self: *ArrayList) []*Foo {
+ _ = self;
+ return @as([*]*Foo, undefined)[0..0];
+ }
+ };
+
+ fn doTheTest() !void {
+ var objects: ArrayList = undefined;
+
+ for (objects.toSlice()) |obj| {
+ if (eql(obj.name)) {
+ ok = false;
+ }
+ }
+
+ try expect(ok);
+ }
+ };
+ try S.doTheTest();
+}
diff --git a/test/behavior/struct_llvm.zig b/test/behavior/struct_llvm.zig
deleted file mode 100644
index eef7b16487..0000000000
--- a/test/behavior/struct_llvm.zig
+++ /dev/null
@@ -1,802 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const native_endian = builtin.target.cpu.arch.endian();
-const expect = std.testing.expect;
-const expectEqual = std.testing.expectEqual;
-const expectEqualSlices = std.testing.expectEqualSlices;
-const maxInt = std.math.maxInt;
-
-const Node = struct {
- val: Val,
- next: *Node,
-};
-
-const Val = struct {
- x: i32,
-};
-
-test "struct point to self" {
- var root: Node = undefined;
- root.val.x = 1;
-
- var node: Node = undefined;
- node.next = &root;
- node.val.x = 2;
-
- root.next = &node;
-
- try expect(node.next.next.next.val.x == 1);
-}
-
-test "void struct fields" {
- const foo = VoidStructFieldsFoo{
- .a = void{},
- .b = 1,
- .c = void{},
- };
- try expect(foo.b == 1);
- try expect(@sizeOf(VoidStructFieldsFoo) == 4);
-}
-const VoidStructFieldsFoo = struct {
- a: void,
- b: i32,
- c: void,
-};
-
-test "return empty struct from fn" {
- _ = testReturnEmptyStructFromFn();
-}
-const EmptyStruct2 = struct {};
-fn testReturnEmptyStructFromFn() EmptyStruct2 {
- return EmptyStruct2{};
-}
-
-test "pass slice of empty struct to fn" {
- try expect(testPassSliceOfEmptyStructToFn(&[_]EmptyStruct2{EmptyStruct2{}}) == 1);
-}
-fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
- return slice.len;
-}
-
-test "self-referencing struct via array member" {
- const T = struct {
- children: [1]*@This(),
- };
- var x: T = undefined;
- x = T{ .children = .{&x} };
- try expect(x.children[0] == &x);
-}
-
-test "empty struct method call" {
- const es = EmptyStruct{};
- try expect(es.method() == 1234);
-}
-const EmptyStruct = struct {
- fn method(es: *const EmptyStruct) i32 {
- _ = es;
- return 1234;
- }
-};
-
-test "align 1 field before self referential align 8 field as slice return type" {
- const result = alloc(Expr);
- try expect(result.len == 0);
-}
-
-const Expr = union(enum) {
- Literal: u8,
- Question: *Expr,
-};
-
-fn alloc(comptime T: type) []T {
- return &[_]T{};
-}
-
-const APackedStruct = packed struct {
- x: u8,
- y: u8,
-};
-
-test "packed struct" {
- var foo = APackedStruct{
- .x = 1,
- .y = 2,
- };
- foo.y += 1;
- const four = foo.x + foo.y;
- try expect(four == 4);
-}
-
-const Foo24Bits = packed struct {
- field: u24,
-};
-const Foo96Bits = packed struct {
- a: u24,
- b: u24,
- c: u24,
- d: u24,
-};
-
-test "packed struct 24bits" {
- comptime {
- try expect(@sizeOf(Foo24Bits) == 4);
- if (@sizeOf(usize) == 4) {
- try expect(@sizeOf(Foo96Bits) == 12);
- } else {
- try expect(@sizeOf(Foo96Bits) == 16);
- }
- }
-
- var value = Foo96Bits{
- .a = 0,
- .b = 0,
- .c = 0,
- .d = 0,
- };
- value.a += 1;
- try expect(value.a == 1);
- try expect(value.b == 0);
- try expect(value.c == 0);
- try expect(value.d == 0);
-
- value.b += 1;
- try expect(value.a == 1);
- try expect(value.b == 1);
- try expect(value.c == 0);
- try expect(value.d == 0);
-
- value.c += 1;
- try expect(value.a == 1);
- try expect(value.b == 1);
- try expect(value.c == 1);
- try expect(value.d == 0);
-
- value.d += 1;
- try expect(value.a == 1);
- try expect(value.b == 1);
- try expect(value.c == 1);
- try expect(value.d == 1);
-}
-
-test "runtime struct initialization of bitfield" {
- const s1 = Nibbles{
- .x = x1,
- .y = x1,
- };
- const s2 = Nibbles{
- .x = @intCast(u4, x2),
- .y = @intCast(u4, x2),
- };
-
- try expect(s1.x == x1);
- try expect(s1.y == x1);
- try expect(s2.x == @intCast(u4, x2));
- try expect(s2.y == @intCast(u4, x2));
-}
-
-var x1 = @as(u4, 1);
-var x2 = @as(u8, 2);
-
-const Nibbles = packed struct {
- x: u4,
- y: u4,
-};
-
-const Bitfields = packed struct {
- f1: u16,
- f2: u16,
- f3: u8,
- f4: u8,
- f5: u4,
- f6: u4,
- f7: u8,
-};
-
-test "native bit field understands endianness" {
- var all: u64 = if (native_endian != .Little)
- 0x1111222233445677
- else
- 0x7765443322221111;
- var bytes: [8]u8 = undefined;
- @memcpy(&bytes, @ptrCast([*]u8, &all), 8);
- var bitfields = @ptrCast(*Bitfields, &bytes).*;
-
- try expect(bitfields.f1 == 0x1111);
- try expect(bitfields.f2 == 0x2222);
- try expect(bitfields.f3 == 0x33);
- try expect(bitfields.f4 == 0x44);
- try expect(bitfields.f5 == 0x5);
- try expect(bitfields.f6 == 0x6);
- try expect(bitfields.f7 == 0x77);
-}
-
-test "implicit cast packed struct field to const ptr" {
- const LevelUpMove = packed struct {
- move_id: u9,
- level: u7,
-
- fn toInt(value: u7) u7 {
- return value;
- }
- };
-
- var lup: LevelUpMove = undefined;
- lup.level = 12;
- const res = LevelUpMove.toInt(lup.level);
- try expect(res == 12);
-}
-
-test "zero-bit field in packed struct" {
- const S = packed struct {
- x: u10,
- y: void,
- };
- var x: S = undefined;
- _ = x;
-}
-
-test "packed struct with non-ABI-aligned field" {
- const S = packed struct {
- x: u9,
- y: u183,
- };
- var s: S = undefined;
- s.x = 1;
- s.y = 42;
- try expect(s.x == 1);
- try expect(s.y == 42);
-}
-
-const BitField1 = packed struct {
- a: u3,
- b: u3,
- c: u2,
-};
-
-const bit_field_1 = BitField1{
- .a = 1,
- .b = 2,
- .c = 3,
-};
-
-test "bit field access" {
- var data = bit_field_1;
- try expect(getA(&data) == 1);
- try expect(getB(&data) == 2);
- try expect(getC(&data) == 3);
- comptime try expect(@sizeOf(BitField1) == 1);
-
- data.b += 1;
- try expect(data.b == 3);
-
- data.a += 1;
- try expect(data.a == 2);
- try expect(data.b == 3);
-}
-
-fn getA(data: *const BitField1) u3 {
- return data.a;
-}
-
-fn getB(data: *const BitField1) u3 {
- return data.b;
-}
-
-fn getC(data: *const BitField1) u2 {
- return data.c;
-}
-
-test "default struct initialization fields" {
- const S = struct {
- a: i32 = 1234,
- b: i32,
- };
- const x = S{
- .b = 5,
- };
- var five: i32 = 5;
- const y = S{
- .b = five,
- };
- if (x.a + x.b != 1239) {
- @compileError("it should be comptime known");
- }
- try expect(y.a == x.a);
- try expect(y.b == x.b);
- try expect(1239 == x.a + x.b);
-}
-
-// TODO revisit this test when doing https://github.com/ziglang/zig/issues/1512
-test "packed array 24bits" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- comptime {
- try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
- try expect(@sizeOf(FooArray24Bits) == 2 + 2 * 4 + 2);
- }
-
- var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
- bytes[bytes.len - 1] = 0xaa;
- const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
- try expect(ptr.a == 0);
- try expect(ptr.b[0].field == 0);
- try expect(ptr.b[1].field == 0);
- try expect(ptr.c == 0);
-
- ptr.a = maxInt(u16);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == 0);
- try expect(ptr.b[1].field == 0);
- try expect(ptr.c == 0);
-
- ptr.b[0].field = maxInt(u24);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == maxInt(u24));
- try expect(ptr.b[1].field == 0);
- try expect(ptr.c == 0);
-
- ptr.b[1].field = maxInt(u24);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == maxInt(u24));
- try expect(ptr.b[1].field == maxInt(u24));
- try expect(ptr.c == 0);
-
- ptr.c = maxInt(u16);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == maxInt(u24));
- try expect(ptr.b[1].field == maxInt(u24));
- try expect(ptr.c == maxInt(u16));
-
- try expect(bytes[bytes.len - 1] == 0xaa);
-}
-
-const Foo32Bits = packed struct {
- field: u24,
- pad: u8,
-};
-
-const FooArray24Bits = packed struct {
- a: u16,
- b: [2]Foo32Bits,
- c: u16,
-};
-
-test "aligned array of packed struct" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- comptime {
- try expect(@sizeOf(FooStructAligned) == 2);
- try expect(@sizeOf(FooArrayOfAligned) == 2 * 2);
- }
-
- var bytes = [_]u8{0xbb} ** @sizeOf(FooArrayOfAligned);
- const ptr = &std.mem.bytesAsSlice(FooArrayOfAligned, bytes[0..])[0];
-
- try expect(ptr.a[0].a == 0xbb);
- try expect(ptr.a[0].b == 0xbb);
- try expect(ptr.a[1].a == 0xbb);
- try expect(ptr.a[1].b == 0xbb);
-}
-
-const FooStructAligned = packed struct {
- a: u8,
- b: u8,
-};
-
-const FooArrayOfAligned = packed struct {
- a: [2]FooStructAligned,
-};
-
-test "pointer to packed struct member in a stack variable" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = packed struct {
- a: u2,
- b: u2,
- };
-
- var s = S{ .a = 2, .b = 0 };
- var b_ptr = &s.b;
- try expect(s.b == 0);
- b_ptr.* = 2;
- try expect(s.b == 2);
-}
-
-test "non-byte-aligned array inside packed struct" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const Foo = packed struct {
- a: bool,
- b: [0x16]u8,
- };
- const S = struct {
- fn bar(slice: []const u8) !void {
- try expectEqualSlices(u8, slice, "abcdefghijklmnopqurstu");
- }
- fn doTheTest() !void {
- var foo = Foo{
- .a = true,
- .b = "abcdefghijklmnopqurstu".*,
- };
- const value = foo.b;
- try bar(&value);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "packed struct with u0 field access" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = packed struct {
- f0: u0,
- };
- var s = S{ .f0 = 0 };
- comptime try expect(s.f0 == 0);
-}
-
-test "access to global struct fields" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- g_foo.bar.value = 42;
- try expect(g_foo.bar.value == 42);
-}
-
-const S0 = struct {
- bar: S1,
-
- pub const S1 = struct {
- value: u8,
- };
-
- fn init() @This() {
- return S0{ .bar = S1{ .value = 123 } };
- }
-};
-
-var g_foo: S0 = S0.init();
-
-test "packed struct with fp fields" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = packed struct {
- data: [3]f32,
-
- pub fn frob(self: *@This()) void {
- self.data[0] += self.data[1] + self.data[2];
- self.data[1] += self.data[0] + self.data[2];
- self.data[2] += self.data[0] + self.data[1];
- }
- };
-
- var s: S = undefined;
- s.data[0] = 1.0;
- s.data[1] = 2.0;
- s.data[2] = 3.0;
- s.frob();
- try expectEqual(@as(f32, 6.0), s.data[0]);
- try expectEqual(@as(f32, 11.0), s.data[1]);
- try expectEqual(@as(f32, 20.0), s.data[2]);
-}
-
-test "fn with C calling convention returns struct by value" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- fn entry() !void {
- var x = makeBar(10);
- try expectEqual(@as(i32, 10), x.handle);
- }
-
- const ExternBar = extern struct {
- handle: i32,
- };
-
- fn makeBar(t: i32) callconv(.C) ExternBar {
- return ExternBar{
- .handle = t,
- };
- }
- };
- try S.entry();
- comptime try S.entry();
-}
-
-test "non-packed struct with u128 entry in union" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const U = union(enum) {
- Num: u128,
- Void,
- };
-
- const S = struct {
- f1: U,
- f2: U,
- };
-
- var sx: S = undefined;
- var s = &sx;
- try std.testing.expect(@ptrToInt(&s.f2) - @ptrToInt(&s.f1) == @offsetOf(S, "f2"));
- var v2 = U{ .Num = 123 };
- s.f2 = v2;
- try std.testing.expect(s.f2.Num == 123);
-}
-
-test "packed struct field passed to generic function" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- const P = packed struct {
- b: u5,
- g: u5,
- r: u5,
- a: u1,
- };
-
- fn genericReadPackedField(ptr: anytype) u5 {
- return ptr.*;
- }
- };
-
- var p: S.P = undefined;
- p.b = 29;
- var loaded = S.genericReadPackedField(&p.b);
- try expect(loaded == 29);
-}
-
-test "anonymous struct literal syntax" {
- const S = struct {
- const Point = struct {
- x: i32,
- y: i32,
- };
-
- fn doTheTest() !void {
- var p: Point = .{
- .x = 1,
- .y = 2,
- };
- try expect(p.x == 1);
- try expect(p.y == 2);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "fully anonymous struct" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- fn doTheTest() !void {
- try dump(.{
- .int = @as(u32, 1234),
- .float = @as(f64, 12.34),
- .b = true,
- .s = "hi",
- });
- }
- fn dump(args: anytype) !void {
- try expect(args.int == 1234);
- try expect(args.float == 12.34);
- try expect(args.b);
- try expect(args.s[0] == 'h');
- try expect(args.s[1] == 'i');
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "fully anonymous list literal" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- fn doTheTest() !void {
- try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" });
- }
- fn dump(args: anytype) !void {
- try expect(args.@"0" == 1234);
- try expect(args.@"1" == 12.34);
- try expect(args.@"2");
- try expect(args.@"3"[0] == 'h');
- try expect(args.@"3"[1] == 'i');
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "anonymous struct literal assigned to variable" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- var vec = .{ @as(i32, 22), @as(i32, 55), @as(i32, 99) };
- try expect(vec.@"0" == 22);
- try expect(vec.@"1" == 55);
- try expect(vec.@"2" == 99);
-}
-
-test "comptime struct field" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const T = struct {
- a: i32,
- comptime b: i32 = 1234,
- };
-
- var foo: T = undefined;
- comptime try expect(foo.b == 1234);
-}
-
-test "anon struct literal field value initialized with fn call" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- fn doTheTest() !void {
- var x = .{foo()};
- try expectEqualSlices(u8, x[0], "hi");
- }
- fn foo() []const u8 {
- return "hi";
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "struct with union field" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const Value = struct {
- ref: u32 = 2,
- kind: union(enum) {
- None: usize,
- Bool: bool,
- },
- };
-
- var True = Value{
- .kind = .{ .Bool = true },
- };
- try expectEqual(@as(u32, 2), True.ref);
- try expectEqual(true, True.kind.Bool);
-}
-
-test "type coercion of anon struct literal to struct" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- const S2 = struct {
- A: u32,
- B: []const u8,
- C: void,
- D: Foo = .{},
- };
-
- const Foo = struct {
- field: i32 = 1234,
- };
-
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = .{ .A = 123, .B = "foo", .C = {} };
- const t1 = .{ .A = y, .B = "foo", .C = {} };
- const y0: S2 = t0;
- var y1: S2 = t1;
- try expect(y0.A == 123);
- try expect(std.mem.eql(u8, y0.B, "foo"));
- try expect(y0.C == {});
- try expect(y0.D.field == 1234);
- try expect(y1.A == y);
- try expect(std.mem.eql(u8, y1.B, "foo"));
- try expect(y1.C == {});
- try expect(y1.D.field == 1234);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "type coercion of pointer to anon struct literal to pointer to struct" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- const S2 = struct {
- A: u32,
- B: []const u8,
- C: void,
- D: Foo = .{},
- };
-
- const Foo = struct {
- field: i32 = 1234,
- };
-
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = &.{ .A = 123, .B = "foo", .C = {} };
- const t1 = &.{ .A = y, .B = "foo", .C = {} };
- const y0: *const S2 = t0;
- var y1: *const S2 = t1;
- try expect(y0.A == 123);
- try expect(std.mem.eql(u8, y0.B, "foo"));
- try expect(y0.C == {});
- try expect(y0.D.field == 1234);
- try expect(y1.A == y);
- try expect(std.mem.eql(u8, y1.B, "foo"));
- try expect(y1.C == {});
- try expect(y1.D.field == 1234);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "packed struct with undefined initializers" {
- const S = struct {
- const P = packed struct {
- a: u3,
- _a: u3 = undefined,
- b: u3,
- _b: u3 = undefined,
- c: u3,
- _c: u3 = undefined,
- };
-
- fn doTheTest() !void {
- var p: P = undefined;
- p = P{ .a = 2, .b = 4, .c = 6 };
- // Make sure the compiler doesn't touch the unprefixed fields.
- // Use expect since i386-linux doesn't like expectEqual
- try expect(p.a == 2);
- try expect(p.b == 4);
- try expect(p.c == 6);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "for loop over pointers to struct, getting field from struct pointer" {
- // When enabling this test, be careful. I have observed it to pass when compiling
- // stage2 alone, but when using stage1 with -fno-stage1 -fLLVM it fails.
- // Maybe eyeball the LLVM that it generates and run in valgrind, both the compiler
- // and the generated test at runtime.
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
- const S = struct {
- const Foo = struct {
- name: []const u8,
- };
-
- var ok = true;
-
- fn eql(a: []const u8) bool {
- _ = a;
- return true;
- }
-
- const ArrayList = struct {
- fn toSlice(self: *ArrayList) []*Foo {
- _ = self;
- return @as([*]*Foo, undefined)[0..0];
- }
- };
-
- fn doTheTest() !void {
- var objects: ArrayList = undefined;
-
- for (objects.toSlice()) |obj| {
- if (eql(obj.name)) {
- ok = false;
- }
- }
-
- try expect(ok);
- }
- };
- try S.doTheTest();
-}
diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig
index ae3e11c929..001ba538b2 100644
--- a/test/behavior/truncate.zig
+++ b/test/behavior/truncate.zig
@@ -76,3 +76,16 @@ test "truncate on comptime integer" {
var w = @truncate(u1, 1 << 100);
try expect(w == 0);
}
+
+test "truncate on vectors" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest;
+
+ const S = struct {
+ fn doTheTest() !void {
+ var v1: @Vector(4, u16) = .{ 0xaabb, 0xccdd, 0xeeff, 0x1122 };
+ var v2 = @truncate(u8, v1);
+ try expect(std.mem.eql(u8, &@as([4]u8, v2), &[4]u8{ 0xbb, 0xdd, 0xff, 0x22 }));
+ }
+ };
+ try S.doTheTest();
+}
diff --git a/test/behavior/truncate_stage1.zig b/test/behavior/truncate_stage1.zig
deleted file mode 100644
index 5c66085cbb..0000000000
--- a/test/behavior/truncate_stage1.zig
+++ /dev/null
@@ -1,13 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-
-test "truncate on vectors" {
- const S = struct {
- fn doTheTest() !void {
- var v1: @Vector(4, u16) = .{ 0xaabb, 0xccdd, 0xeeff, 0x1122 };
- var v2 = @truncate(u8, v1);
- try expect(std.mem.eql(u8, &@as([4]u8, v2), &[4]u8{ 0xbb, 0xdd, 0xff, 0x22 }));
- }
- };
- try S.doTheTest();
-}
diff --git a/test/behavior/type.zig b/test/behavior/type.zig
index cb72f86b8b..60d4463873 100644
--- a/test/behavior/type.zig
+++ b/test/behavior/type.zig
@@ -102,3 +102,389 @@ test "Type.Pointer" {
[*c]align(8) volatile u8, [*c]align(8) const volatile u8,
});
}
+
+test "Type.Float" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testing.expect(f16 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 16 } }));
+ try testing.expect(f32 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 32 } }));
+ try testing.expect(f64 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 64 } }));
+ try testing.expect(f80 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 80 } }));
+ try testing.expect(f128 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 128 } }));
+ try testTypes(&[_]type{ f16, f32, f64, f80, f128 });
+}
+
+test "Type.Array" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testing.expect([123]u8 == @Type(TypeInfo{
+ .Array = TypeInfo.Array{
+ .len = 123,
+ .child = u8,
+ .sentinel = null,
+ },
+ }));
+ try testing.expect([2]u32 == @Type(TypeInfo{
+ .Array = TypeInfo.Array{
+ .len = 2,
+ .child = u32,
+ .sentinel = null,
+ },
+ }));
+ try testing.expect([2:0]u32 == @Type(TypeInfo{
+ .Array = TypeInfo.Array{
+ .len = 2,
+ .child = u32,
+ .sentinel = &@as(u32, 0),
+ },
+ }));
+ try testTypes(&[_]type{ [1]u8, [30]usize, [7]bool });
+}
+
+test "@Type create slice with null sentinel" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Slice = @Type(TypeInfo{
+ .Pointer = .{
+ .size = .Slice,
+ .is_const = true,
+ .is_volatile = false,
+ .is_allowzero = false,
+ .alignment = 8,
+ .address_space = .generic,
+ .child = *i32,
+ .sentinel = null,
+ },
+ });
+ try testing.expect(Slice == []align(8) const *i32);
+}
+
+test "@Type picks up the sentinel value from TypeInfo" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testTypes(&[_]type{
+ [11:0]u8, [4:10]u8,
+ [*:0]u8, [*:0]const u8,
+ [*:0]volatile u8, [*:0]const volatile u8,
+ [*:0]align(4) u8, [*:0]align(4) const u8,
+ [*:0]align(4) volatile u8, [*:0]align(4) const volatile u8,
+ [*:0]align(8) u8, [*:0]align(8) const u8,
+ [*:0]align(8) volatile u8, [*:0]align(8) const volatile u8,
+ [*:0]allowzero u8, [*:0]allowzero const u8,
+ [*:0]allowzero volatile u8, [*:0]allowzero const volatile u8,
+ [*:0]allowzero align(4) u8, [*:0]allowzero align(4) const u8,
+ [*:0]allowzero align(4) volatile u8, [*:0]allowzero align(4) const volatile u8,
+ [*:5]allowzero align(4) volatile u8, [*:5]allowzero align(4) const volatile u8,
+ [:0]u8, [:0]const u8,
+ [:0]volatile u8, [:0]const volatile u8,
+ [:0]align(4) u8, [:0]align(4) const u8,
+ [:0]align(4) volatile u8, [:0]align(4) const volatile u8,
+ [:0]align(8) u8, [:0]align(8) const u8,
+ [:0]align(8) volatile u8, [:0]align(8) const volatile u8,
+ [:0]allowzero u8, [:0]allowzero const u8,
+ [:0]allowzero volatile u8, [:0]allowzero const volatile u8,
+ [:0]allowzero align(4) u8, [:0]allowzero align(4) const u8,
+ [:0]allowzero align(4) volatile u8, [:0]allowzero align(4) const volatile u8,
+ [:4]allowzero align(4) volatile u8, [:4]allowzero align(4) const volatile u8,
+ });
+}
+
+test "Type.Optional" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testTypes(&[_]type{
+ ?u8,
+ ?*u8,
+ ?[]u8,
+ ?[*]u8,
+ ?[*c]u8,
+ });
+}
+
+test "Type.ErrorUnion" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testTypes(&[_]type{
+ error{}!void,
+ error{Error}!void,
+ });
+}
+
+test "Type.Opaque" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Opaque = @Type(.{
+ .Opaque = .{
+ .decls = &[_]TypeInfo.Declaration{},
+ },
+ });
+ try testing.expect(Opaque != opaque {});
+ try testing.expectEqualSlices(
+ TypeInfo.Declaration,
+ &[_]TypeInfo.Declaration{},
+ @typeInfo(Opaque).Opaque.decls,
+ );
+}
+
+test "Type.Vector" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testTypes(&[_]type{
+ @Vector(0, u8),
+ @Vector(4, u8),
+ @Vector(8, *u8),
+ std.meta.Vector(0, u8),
+ std.meta.Vector(4, u8),
+ std.meta.Vector(8, *u8),
+ });
+}
+
+test "Type.AnyFrame" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ try testTypes(&[_]type{
+ anyframe,
+ anyframe->u8,
+ anyframe->anyframe->u8,
+ });
+}
+
+fn add(a: i32, b: i32) i32 {
+ return a + b;
+}
+
+test "Type.ErrorSet" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ // error sets don't compare equal so just check if they compile
+ _ = @Type(@typeInfo(error{}));
+ _ = @Type(@typeInfo(error{A}));
+ _ = @Type(@typeInfo(error{ A, B, C }));
+}
+
+test "Type.Struct" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const A = @Type(@typeInfo(struct { x: u8, y: u32 }));
+ const infoA = @typeInfo(A).Struct;
+ try testing.expectEqual(TypeInfo.ContainerLayout.Auto, infoA.layout);
+ try testing.expectEqualSlices(u8, "x", infoA.fields[0].name);
+ try testing.expectEqual(u8, infoA.fields[0].field_type);
+ try testing.expectEqual(@as(?*const anyopaque, null), infoA.fields[0].default_value);
+ try testing.expectEqualSlices(u8, "y", infoA.fields[1].name);
+ try testing.expectEqual(u32, infoA.fields[1].field_type);
+ try testing.expectEqual(@as(?*const anyopaque, null), infoA.fields[1].default_value);
+ try testing.expectEqualSlices(TypeInfo.Declaration, &[_]TypeInfo.Declaration{}, infoA.decls);
+ try testing.expectEqual(@as(bool, false), infoA.is_tuple);
+
+ var a = A{ .x = 0, .y = 1 };
+ try testing.expectEqual(@as(u8, 0), a.x);
+ try testing.expectEqual(@as(u32, 1), a.y);
+ a.y += 1;
+ try testing.expectEqual(@as(u32, 2), a.y);
+
+ const B = @Type(@typeInfo(extern struct { x: u8, y: u32 = 5 }));
+ const infoB = @typeInfo(B).Struct;
+ try testing.expectEqual(TypeInfo.ContainerLayout.Extern, infoB.layout);
+ try testing.expectEqualSlices(u8, "x", infoB.fields[0].name);
+ try testing.expectEqual(u8, infoB.fields[0].field_type);
+ try testing.expectEqual(@as(?*const anyopaque, null), infoB.fields[0].default_value);
+ try testing.expectEqualSlices(u8, "y", infoB.fields[1].name);
+ try testing.expectEqual(u32, infoB.fields[1].field_type);
+ try testing.expectEqual(@as(u32, 5), @ptrCast(*const u32, infoB.fields[1].default_value.?).*);
+ try testing.expectEqual(@as(usize, 0), infoB.decls.len);
+ try testing.expectEqual(@as(bool, false), infoB.is_tuple);
+
+ const C = @Type(@typeInfo(packed struct { x: u8 = 3, y: u32 = 5 }));
+ const infoC = @typeInfo(C).Struct;
+ try testing.expectEqual(TypeInfo.ContainerLayout.Packed, infoC.layout);
+ try testing.expectEqualSlices(u8, "x", infoC.fields[0].name);
+ try testing.expectEqual(u8, infoC.fields[0].field_type);
+ try testing.expectEqual(@as(u8, 3), @ptrCast(*const u8, infoC.fields[0].default_value.?).*);
+ try testing.expectEqualSlices(u8, "y", infoC.fields[1].name);
+ try testing.expectEqual(u32, infoC.fields[1].field_type);
+ try testing.expectEqual(@as(u32, 5), @ptrCast(*const u32, infoC.fields[1].default_value.?).*);
+ try testing.expectEqual(@as(usize, 0), infoC.decls.len);
+ try testing.expectEqual(@as(bool, false), infoC.is_tuple);
+}
+
+test "Type.Enum" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Foo = @Type(.{
+ .Enum = .{
+ .layout = .Auto,
+ .tag_type = u8,
+ .fields = &[_]TypeInfo.EnumField{
+ .{ .name = "a", .value = 1 },
+ .{ .name = "b", .value = 5 },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ .is_exhaustive = true,
+ },
+ });
+ try testing.expectEqual(true, @typeInfo(Foo).Enum.is_exhaustive);
+ try testing.expectEqual(@as(u8, 1), @enumToInt(Foo.a));
+ try testing.expectEqual(@as(u8, 5), @enumToInt(Foo.b));
+ const Bar = @Type(.{
+ .Enum = .{
+ .layout = .Extern,
+ .tag_type = u32,
+ .fields = &[_]TypeInfo.EnumField{
+ .{ .name = "a", .value = 1 },
+ .{ .name = "b", .value = 5 },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ .is_exhaustive = false,
+ },
+ });
+ try testing.expectEqual(false, @typeInfo(Bar).Enum.is_exhaustive);
+ try testing.expectEqual(@as(u32, 1), @enumToInt(Bar.a));
+ try testing.expectEqual(@as(u32, 5), @enumToInt(Bar.b));
+ try testing.expectEqual(@as(u32, 6), @enumToInt(@intToEnum(Bar, 6)));
+}
+
+test "Type.Union" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Untagged = @Type(.{
+ .Union = .{
+ .layout = .Auto,
+ .tag_type = null,
+ .fields = &[_]TypeInfo.UnionField{
+ .{ .name = "int", .field_type = i32, .alignment = @alignOf(f32) },
+ .{ .name = "float", .field_type = f32, .alignment = @alignOf(f32) },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ },
+ });
+ var untagged = Untagged{ .int = 1 };
+ untagged.float = 2.0;
+ untagged.int = 3;
+ try testing.expectEqual(@as(i32, 3), untagged.int);
+
+ const PackedUntagged = @Type(.{
+ .Union = .{
+ .layout = .Packed,
+ .tag_type = null,
+ .fields = &[_]TypeInfo.UnionField{
+ .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
+ .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ },
+ });
+ var packed_untagged = PackedUntagged{ .signed = -1 };
+ try testing.expectEqual(@as(i32, -1), packed_untagged.signed);
+ try testing.expectEqual(~@as(u32, 0), packed_untagged.unsigned);
+
+ const Tag = @Type(.{
+ .Enum = .{
+ .layout = .Auto,
+ .tag_type = u1,
+ .fields = &[_]TypeInfo.EnumField{
+ .{ .name = "signed", .value = 0 },
+ .{ .name = "unsigned", .value = 1 },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ .is_exhaustive = true,
+ },
+ });
+ const Tagged = @Type(.{
+ .Union = .{
+ .layout = .Auto,
+ .tag_type = Tag,
+ .fields = &[_]TypeInfo.UnionField{
+ .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
+ .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ },
+ });
+ var tagged = Tagged{ .signed = -1 };
+ try testing.expectEqual(Tag.signed, tagged);
+ tagged = .{ .unsigned = 1 };
+ try testing.expectEqual(Tag.unsigned, tagged);
+}
+
+test "Type.Union from Type.Enum" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const Tag = @Type(.{
+ .Enum = .{
+ .layout = .Auto,
+ .tag_type = u0,
+ .fields = &[_]TypeInfo.EnumField{
+ .{ .name = "working_as_expected", .value = 0 },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ .is_exhaustive = true,
+ },
+ });
+ const T = @Type(.{
+ .Union = .{
+ .layout = .Auto,
+ .tag_type = Tag,
+ .fields = &[_]TypeInfo.UnionField{
+ .{ .name = "working_as_expected", .field_type = u32, .alignment = @alignOf(u32) },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ },
+ });
+ _ = T;
+ _ = @typeInfo(T).Union;
+}
+
+test "Type.Union from regular enum" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ const E = enum { working_as_expected };
+ const T = @Type(.{
+ .Union = .{
+ .layout = .Auto,
+ .tag_type = E,
+ .fields = &[_]TypeInfo.UnionField{
+ .{ .name = "working_as_expected", .field_type = u32, .alignment = @alignOf(u32) },
+ },
+ .decls = &[_]TypeInfo.Declaration{},
+ },
+ });
+ _ = T;
+ _ = @typeInfo(T).Union;
+}
+
+test "Type.Fn" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ // wasm doesn't support align attributes on functions
+ if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
+
+ const foo = struct {
+ fn func(a: usize, b: bool) align(4) callconv(.C) usize {
+ _ = a;
+ _ = b;
+ return 0;
+ }
+ }.func;
+ const Foo = @Type(@typeInfo(@TypeOf(foo)));
+ const foo_2: Foo = foo;
+ _ = foo_2;
+}
+
+test "Type.BoundFn" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
+ // wasm doesn't support align attributes on functions
+ if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
+
+ const TestStruct = packed struct {
+ pub fn foo(self: *const @This()) align(4) callconv(.Unspecified) void {
+ _ = self;
+ }
+ };
+ const test_instance: TestStruct = undefined;
+ try testing.expect(std.meta.eql(
+ @typeName(@TypeOf(test_instance.foo)),
+ @typeName(@Type(@typeInfo(@TypeOf(test_instance.foo)))),
+ ));
+}
diff --git a/test/behavior/type_stage1.zig b/test/behavior/type_stage1.zig
deleted file mode 100644
index 852c9fc82d..0000000000
--- a/test/behavior/type_stage1.zig
+++ /dev/null
@@ -1,362 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const TypeInfo = std.builtin.TypeInfo;
-const testing = std.testing;
-
-fn testTypes(comptime types: []const type) !void {
- inline for (types) |testType| {
- try testing.expect(testType == @Type(@typeInfo(testType)));
- }
-}
-
-test "Type.Float" {
- try testing.expect(f16 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 16 } }));
- try testing.expect(f32 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 32 } }));
- try testing.expect(f64 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 64 } }));
- try testing.expect(f80 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 80 } }));
- try testing.expect(f128 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 128 } }));
- try testTypes(&[_]type{ f16, f32, f64, f80, f128 });
-}
-
-test "Type.Array" {
- try testing.expect([123]u8 == @Type(TypeInfo{
- .Array = TypeInfo.Array{
- .len = 123,
- .child = u8,
- .sentinel = null,
- },
- }));
- try testing.expect([2]u32 == @Type(TypeInfo{
- .Array = TypeInfo.Array{
- .len = 2,
- .child = u32,
- .sentinel = null,
- },
- }));
- try testing.expect([2:0]u32 == @Type(TypeInfo{
- .Array = TypeInfo.Array{
- .len = 2,
- .child = u32,
- .sentinel = &@as(u32, 0),
- },
- }));
- try testTypes(&[_]type{ [1]u8, [30]usize, [7]bool });
-}
-
-test "@Type create slice with null sentinel" {
- const Slice = @Type(TypeInfo{
- .Pointer = .{
- .size = .Slice,
- .is_const = true,
- .is_volatile = false,
- .is_allowzero = false,
- .alignment = 8,
- .address_space = .generic,
- .child = *i32,
- .sentinel = null,
- },
- });
- try testing.expect(Slice == []align(8) const *i32);
-}
-
-test "@Type picks up the sentinel value from TypeInfo" {
- try testTypes(&[_]type{
- [11:0]u8, [4:10]u8,
- [*:0]u8, [*:0]const u8,
- [*:0]volatile u8, [*:0]const volatile u8,
- [*:0]align(4) u8, [*:0]align(4) const u8,
- [*:0]align(4) volatile u8, [*:0]align(4) const volatile u8,
- [*:0]align(8) u8, [*:0]align(8) const u8,
- [*:0]align(8) volatile u8, [*:0]align(8) const volatile u8,
- [*:0]allowzero u8, [*:0]allowzero const u8,
- [*:0]allowzero volatile u8, [*:0]allowzero const volatile u8,
- [*:0]allowzero align(4) u8, [*:0]allowzero align(4) const u8,
- [*:0]allowzero align(4) volatile u8, [*:0]allowzero align(4) const volatile u8,
- [*:5]allowzero align(4) volatile u8, [*:5]allowzero align(4) const volatile u8,
- [:0]u8, [:0]const u8,
- [:0]volatile u8, [:0]const volatile u8,
- [:0]align(4) u8, [:0]align(4) const u8,
- [:0]align(4) volatile u8, [:0]align(4) const volatile u8,
- [:0]align(8) u8, [:0]align(8) const u8,
- [:0]align(8) volatile u8, [:0]align(8) const volatile u8,
- [:0]allowzero u8, [:0]allowzero const u8,
- [:0]allowzero volatile u8, [:0]allowzero const volatile u8,
- [:0]allowzero align(4) u8, [:0]allowzero align(4) const u8,
- [:0]allowzero align(4) volatile u8, [:0]allowzero align(4) const volatile u8,
- [:4]allowzero align(4) volatile u8, [:4]allowzero align(4) const volatile u8,
- });
-}
-
-test "Type.Optional" {
- try testTypes(&[_]type{
- ?u8,
- ?*u8,
- ?[]u8,
- ?[*]u8,
- ?[*c]u8,
- });
-}
-
-test "Type.ErrorUnion" {
- try testTypes(&[_]type{
- error{}!void,
- error{Error}!void,
- });
-}
-
-test "Type.Opaque" {
- const Opaque = @Type(.{
- .Opaque = .{
- .decls = &[_]TypeInfo.Declaration{},
- },
- });
- try testing.expect(Opaque != opaque {});
- try testing.expectEqualSlices(
- TypeInfo.Declaration,
- &[_]TypeInfo.Declaration{},
- @typeInfo(Opaque).Opaque.decls,
- );
-}
-
-test "Type.Vector" {
- try testTypes(&[_]type{
- @Vector(0, u8),
- @Vector(4, u8),
- @Vector(8, *u8),
- std.meta.Vector(0, u8),
- std.meta.Vector(4, u8),
- std.meta.Vector(8, *u8),
- });
-}
-
-test "Type.AnyFrame" {
- try testTypes(&[_]type{
- anyframe,
- anyframe->u8,
- anyframe->anyframe->u8,
- });
-}
-
-fn add(a: i32, b: i32) i32 {
- return a + b;
-}
-
-test "Type.ErrorSet" {
- // error sets don't compare equal so just check if they compile
- _ = @Type(@typeInfo(error{}));
- _ = @Type(@typeInfo(error{A}));
- _ = @Type(@typeInfo(error{ A, B, C }));
-}
-
-test "Type.Struct" {
- const A = @Type(@typeInfo(struct { x: u8, y: u32 }));
- const infoA = @typeInfo(A).Struct;
- try testing.expectEqual(TypeInfo.ContainerLayout.Auto, infoA.layout);
- try testing.expectEqualSlices(u8, "x", infoA.fields[0].name);
- try testing.expectEqual(u8, infoA.fields[0].field_type);
- try testing.expectEqual(@as(?*const anyopaque, null), infoA.fields[0].default_value);
- try testing.expectEqualSlices(u8, "y", infoA.fields[1].name);
- try testing.expectEqual(u32, infoA.fields[1].field_type);
- try testing.expectEqual(@as(?*const anyopaque, null), infoA.fields[1].default_value);
- try testing.expectEqualSlices(TypeInfo.Declaration, &[_]TypeInfo.Declaration{}, infoA.decls);
- try testing.expectEqual(@as(bool, false), infoA.is_tuple);
-
- var a = A{ .x = 0, .y = 1 };
- try testing.expectEqual(@as(u8, 0), a.x);
- try testing.expectEqual(@as(u32, 1), a.y);
- a.y += 1;
- try testing.expectEqual(@as(u32, 2), a.y);
-
- const B = @Type(@typeInfo(extern struct { x: u8, y: u32 = 5 }));
- const infoB = @typeInfo(B).Struct;
- try testing.expectEqual(TypeInfo.ContainerLayout.Extern, infoB.layout);
- try testing.expectEqualSlices(u8, "x", infoB.fields[0].name);
- try testing.expectEqual(u8, infoB.fields[0].field_type);
- try testing.expectEqual(@as(?*const anyopaque, null), infoB.fields[0].default_value);
- try testing.expectEqualSlices(u8, "y", infoB.fields[1].name);
- try testing.expectEqual(u32, infoB.fields[1].field_type);
- try testing.expectEqual(@as(u32, 5), @ptrCast(*const u32, infoB.fields[1].default_value.?).*);
- try testing.expectEqual(@as(usize, 0), infoB.decls.len);
- try testing.expectEqual(@as(bool, false), infoB.is_tuple);
-
- const C = @Type(@typeInfo(packed struct { x: u8 = 3, y: u32 = 5 }));
- const infoC = @typeInfo(C).Struct;
- try testing.expectEqual(TypeInfo.ContainerLayout.Packed, infoC.layout);
- try testing.expectEqualSlices(u8, "x", infoC.fields[0].name);
- try testing.expectEqual(u8, infoC.fields[0].field_type);
- try testing.expectEqual(@as(u8, 3), @ptrCast(*const u8, infoC.fields[0].default_value.?).*);
- try testing.expectEqualSlices(u8, "y", infoC.fields[1].name);
- try testing.expectEqual(u32, infoC.fields[1].field_type);
- try testing.expectEqual(@as(u32, 5), @ptrCast(*const u32, infoC.fields[1].default_value.?).*);
- try testing.expectEqual(@as(usize, 0), infoC.decls.len);
- try testing.expectEqual(@as(bool, false), infoC.is_tuple);
-}
-
-test "Type.Enum" {
- const Foo = @Type(.{
- .Enum = .{
- .layout = .Auto,
- .tag_type = u8,
- .fields = &[_]TypeInfo.EnumField{
- .{ .name = "a", .value = 1 },
- .{ .name = "b", .value = 5 },
- },
- .decls = &[_]TypeInfo.Declaration{},
- .is_exhaustive = true,
- },
- });
- try testing.expectEqual(true, @typeInfo(Foo).Enum.is_exhaustive);
- try testing.expectEqual(@as(u8, 1), @enumToInt(Foo.a));
- try testing.expectEqual(@as(u8, 5), @enumToInt(Foo.b));
- const Bar = @Type(.{
- .Enum = .{
- .layout = .Extern,
- .tag_type = u32,
- .fields = &[_]TypeInfo.EnumField{
- .{ .name = "a", .value = 1 },
- .{ .name = "b", .value = 5 },
- },
- .decls = &[_]TypeInfo.Declaration{},
- .is_exhaustive = false,
- },
- });
- try testing.expectEqual(false, @typeInfo(Bar).Enum.is_exhaustive);
- try testing.expectEqual(@as(u32, 1), @enumToInt(Bar.a));
- try testing.expectEqual(@as(u32, 5), @enumToInt(Bar.b));
- try testing.expectEqual(@as(u32, 6), @enumToInt(@intToEnum(Bar, 6)));
-}
-
-test "Type.Union" {
- const Untagged = @Type(.{
- .Union = .{
- .layout = .Auto,
- .tag_type = null,
- .fields = &[_]TypeInfo.UnionField{
- .{ .name = "int", .field_type = i32, .alignment = @alignOf(f32) },
- .{ .name = "float", .field_type = f32, .alignment = @alignOf(f32) },
- },
- .decls = &[_]TypeInfo.Declaration{},
- },
- });
- var untagged = Untagged{ .int = 1 };
- untagged.float = 2.0;
- untagged.int = 3;
- try testing.expectEqual(@as(i32, 3), untagged.int);
-
- const PackedUntagged = @Type(.{
- .Union = .{
- .layout = .Packed,
- .tag_type = null,
- .fields = &[_]TypeInfo.UnionField{
- .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
- .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
- },
- .decls = &[_]TypeInfo.Declaration{},
- },
- });
- var packed_untagged = PackedUntagged{ .signed = -1 };
- try testing.expectEqual(@as(i32, -1), packed_untagged.signed);
- try testing.expectEqual(~@as(u32, 0), packed_untagged.unsigned);
-
- const Tag = @Type(.{
- .Enum = .{
- .layout = .Auto,
- .tag_type = u1,
- .fields = &[_]TypeInfo.EnumField{
- .{ .name = "signed", .value = 0 },
- .{ .name = "unsigned", .value = 1 },
- },
- .decls = &[_]TypeInfo.Declaration{},
- .is_exhaustive = true,
- },
- });
- const Tagged = @Type(.{
- .Union = .{
- .layout = .Auto,
- .tag_type = Tag,
- .fields = &[_]TypeInfo.UnionField{
- .{ .name = "signed", .field_type = i32, .alignment = @alignOf(i32) },
- .{ .name = "unsigned", .field_type = u32, .alignment = @alignOf(u32) },
- },
- .decls = &[_]TypeInfo.Declaration{},
- },
- });
- var tagged = Tagged{ .signed = -1 };
- try testing.expectEqual(Tag.signed, tagged);
- tagged = .{ .unsigned = 1 };
- try testing.expectEqual(Tag.unsigned, tagged);
-}
-
-test "Type.Union from Type.Enum" {
- const Tag = @Type(.{
- .Enum = .{
- .layout = .Auto,
- .tag_type = u0,
- .fields = &[_]TypeInfo.EnumField{
- .{ .name = "working_as_expected", .value = 0 },
- },
- .decls = &[_]TypeInfo.Declaration{},
- .is_exhaustive = true,
- },
- });
- const T = @Type(.{
- .Union = .{
- .layout = .Auto,
- .tag_type = Tag,
- .fields = &[_]TypeInfo.UnionField{
- .{ .name = "working_as_expected", .field_type = u32, .alignment = @alignOf(u32) },
- },
- .decls = &[_]TypeInfo.Declaration{},
- },
- });
- _ = T;
- _ = @typeInfo(T).Union;
-}
-
-test "Type.Union from regular enum" {
- const E = enum { working_as_expected };
- const T = @Type(.{
- .Union = .{
- .layout = .Auto,
- .tag_type = E,
- .fields = &[_]TypeInfo.UnionField{
- .{ .name = "working_as_expected", .field_type = u32, .alignment = @alignOf(u32) },
- },
- .decls = &[_]TypeInfo.Declaration{},
- },
- });
- _ = T;
- _ = @typeInfo(T).Union;
-}
-
-test "Type.Fn" {
- // wasm doesn't support align attributes on functions
- if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
-
- const foo = struct {
- fn func(a: usize, b: bool) align(4) callconv(.C) usize {
- _ = a;
- _ = b;
- return 0;
- }
- }.func;
- const Foo = @Type(@typeInfo(@TypeOf(foo)));
- const foo_2: Foo = foo;
- _ = foo_2;
-}
-
-test "Type.BoundFn" {
- // wasm doesn't support align attributes on functions
- if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
-
- const TestStruct = packed struct {
- pub fn foo(self: *const @This()) align(4) callconv(.Unspecified) void {
- _ = self;
- }
- };
- const test_instance: TestStruct = undefined;
- try testing.expect(std.meta.eql(
- @typeName(@TypeOf(test_instance.foo)),
- @typeName(@Type(@typeInfo(@TypeOf(test_instance.foo)))),
- ));
-}
From 0b7347fd18eee7dd829cd9aaed3683123d84859b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 12 Feb 2022 21:35:29 -0700
Subject: [PATCH 0154/2031] move more behavior tests to the "passing" section
---
test/behavior.zig | 14 +++++++-------
test/behavior/fn_delegation.zig | 3 +++
test/behavior/ir_block_deps.zig | 4 ++++
test/behavior/reflection.zig | 6 ++++++
test/behavior/tuple.zig | 11 +++++++++++
test/behavior/union.zig | 2 +-
test/behavior/var_args.zig | 19 +++++++++++++++++++
7 files changed, 51 insertions(+), 8 deletions(-)
diff --git a/test/behavior.zig b/test/behavior.zig
index 404ce376a2..6b08465429 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -31,18 +31,23 @@ test {
_ = @import("behavior/bugs/7250.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/comptime_memory.zig");
+ _ = @import("behavior/fn_delegation.zig");
_ = @import("behavior/fn_in_struct_in_comptime.zig");
_ = @import("behavior/hasdecl.zig");
_ = @import("behavior/hasfield.zig");
+ _ = @import("behavior/ir_block_deps.zig");
_ = @import("behavior/namespace_depends_on_compile_var.zig");
_ = @import("behavior/optional.zig");
_ = @import("behavior/prefetch.zig");
_ = @import("behavior/pub_enum.zig");
+ _ = @import("behavior/reflection.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
- _ = @import("behavior/type.zig");
- _ = @import("behavior/truncate.zig");
_ = @import("behavior/struct.zig");
+ _ = @import("behavior/truncate.zig");
+ _ = @import("behavior/tuple.zig");
+ _ = @import("behavior/type.zig");
+ _ = @import("behavior/var_args.zig");
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
@@ -145,21 +150,16 @@ test {
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/export_self_referential_type_info.zig");
_ = @import("behavior/field_parent_ptr.zig");
- _ = @import("behavior/fn_delegation.zig");
- _ = @import("behavior/ir_block_deps.zig");
_ = @import("behavior/misc.zig");
_ = @import("behavior/muladd.zig");
- _ = @import("behavior/reflection.zig");
_ = @import("behavior/select.zig");
_ = @import("behavior/shuffle.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
_ = @import("behavior/switch_prong_implicit_cast.zig");
- _ = @import("behavior/tuple.zig");
_ = @import("behavior/typename.zig");
_ = @import("behavior/union_with_members.zig");
- _ = @import("behavior/var_args.zig");
_ = @import("behavior/vector.zig");
if (builtin.target.cpu.arch == .wasm32) {
_ = @import("behavior/wasm.zig");
diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig
index 72a72c0bdd..25ec3dea1b 100644
--- a/test/behavior/fn_delegation.zig
+++ b/test/behavior/fn_delegation.zig
@@ -1,3 +1,4 @@
+const builtin = @import("builtin");
const expect = @import("std").testing.expect;
const Foo = struct {
@@ -31,6 +32,8 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 {
}
test "fn delegation" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
const foo = Foo{};
try expect(foo.one() == 11);
try expect(foo.two() == 12);
diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig
index 09c1532bff..cbc5cc2419 100644
--- a/test/behavior/ir_block_deps.zig
+++ b/test/behavior/ir_block_deps.zig
@@ -1,3 +1,4 @@
+const builtin = @import("builtin");
const expect = @import("std").testing.expect;
fn foo(id: u64) !i32 {
@@ -17,6 +18,9 @@ fn getErrInt() anyerror!i32 {
}
test "ir block deps" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
try expect((foo(1) catch unreachable) == 0);
try expect((foo(2) catch unreachable) == 0);
}
diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig
index 18ee9d5c8b..96c81fe0d0 100644
--- a/test/behavior/reflection.zig
+++ b/test/behavior/reflection.zig
@@ -1,9 +1,12 @@
+const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const mem = std.mem;
const reflection = @This();
test "reflection: function return type, var args, and param types" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
comptime {
const info = @typeInfo(@TypeOf(dummy)).Fn;
try expect(info.return_type.? == i32);
@@ -25,6 +28,9 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
}
test "reflection: @field" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
var f = Foo{
.one = 42,
.two = true,
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 632e5be013..680de28b76 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -1,9 +1,12 @@
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "tuple concatenation" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
const S = struct {
fn doTheTest() !void {
var a: i32 = 1;
@@ -20,6 +23,8 @@ test "tuple concatenation" {
}
test "tuple multiplication" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
const S = struct {
fn doTheTest() !void {
{
@@ -81,6 +86,8 @@ test "tuple multiplication" {
}
test "pass tuple to comptime var parameter" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
const S = struct {
fn Foo(comptime args: anytype) !void {
try expect(args[0] == 1);
@@ -95,6 +102,8 @@ test "pass tuple to comptime var parameter" {
}
test "tuple initializer for var" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
const S = struct {
fn doTheTest() void {
const Bytes = struct {
@@ -114,6 +123,8 @@ test "tuple initializer for var" {
}
test "array-like initializer for tuple types" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
const T = @Type(std.builtin.TypeInfo{
.Struct = std.builtin.TypeInfo.Struct{
.is_tuple = true,
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index cdd63df44e..1cd5b05eb1 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -362,7 +362,7 @@ pub const FooUnion = union(enum) {
var glbl_array: [2]FooUnion = undefined;
test "initialize global array of union" {
- if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
glbl_array[1] = FooUnion{ .U1 = 2 };
glbl_array[0] = FooUnion{ .U0 = 1 };
diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig
index 40770e1334..63b8c35e1b 100644
--- a/test/behavior/var_args.zig
+++ b/test/behavior/var_args.zig
@@ -1,3 +1,4 @@
+const builtin = @import("builtin");
const expect = @import("std").testing.expect;
fn add(args: anytype) i32 {
@@ -12,6 +13,8 @@ fn add(args: anytype) i32 {
}
test "add arbitrary args" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
try expect(add(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10);
try expect(add(.{@as(i32, 1234)}) == 1234);
try expect(add(.{}) == 0);
@@ -22,10 +25,16 @@ fn readFirstVarArg(args: anytype) void {
}
test "send void arg to var args" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
readFirstVarArg(.{{}});
}
test "pass args directly" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
try expect(addSomeStuff(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10);
try expect(addSomeStuff(.{@as(i32, 1234)}) == 1234);
try expect(addSomeStuff(.{}) == 0);
@@ -36,6 +45,8 @@ fn addSomeStuff(args: anytype) i32 {
}
test "runtime parameter before var args" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
+
try expect((try extraFn(10, .{})) == 0);
try expect((try extraFn(10, .{false})) == 1);
try expect((try extraFn(10, .{ false, true })) == 2);
@@ -73,11 +84,19 @@ fn foo2(args: anytype) bool {
}
test "array of var args functions" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
try expect(foos[0](.{}));
try expect(!foos[1](.{}));
}
test "pass zero length array to var args param" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
doNothingWithFirstArg(.{""});
}
From ddd6de86f7eb71814d3605d3e0ea9ed01d075613 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sun, 13 Feb 2022 12:34:41 +0200
Subject: [PATCH 0155/2031] parser: make missing semicolon error point to the
end of the previous token
---
lib/std/zig/Ast.zig | 22 ++++++++++++++++++++++
lib/std/zig/parse.zig | 19 ++++++++++++++-----
src/Module.zig | 3 ++-
src/main.zig | 5 +++--
4 files changed, 41 insertions(+), 8 deletions(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 17da4f5315..16430fe9d4 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -64,6 +64,17 @@ pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void
return @import("./render.zig").renderTree(buffer, tree);
}
+/// Returns an extra offset for column and byte offset of errors that
+/// should point after the token in the error message.
+pub fn errorOffset(tree:Ast, error_tag: Error.Tag, token: TokenIndex) u32 {
+ return switch (error_tag) {
+ .expected_semi_after_decl,
+ .expected_semi_after_stmt,
+ => @intCast(u32, tree.tokenSlice(token).len),
+ else => 0,
+ };
+}
+
pub fn tokenLocation(self: Ast, start_offset: ByteOffset, token_index: TokenIndex) Location {
var loc = Location{
.line = 0,
@@ -306,6 +317,13 @@ pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void {
return stream.writeAll("function prototype has parameter after varargs");
},
+ .expected_semi_after_decl => {
+ return stream.writeAll("expected ';' after declaration");
+ },
+ .expected_semi_after_stmt => {
+ return stream.writeAll("expected ';' after statement");
+ },
+
.expected_token => {
const found_tag = token_tags[parse_error.token];
const expected_symbol = parse_error.extra.expected_tag.symbol();
@@ -2495,6 +2513,10 @@ pub const Error = struct {
unattached_doc_comment,
varargs_nonfinal,
+ // these have `token` set to token after which a semicolon was expected
+ expected_semi_after_decl,
+ expected_semi_after_stmt,
+
/// `expected_tag` is populated.
expected_token,
};
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index a70d0309e3..e818046ac5 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -586,7 +586,7 @@ const Parser = struct {
const thread_local_token = p.eatToken(.keyword_threadlocal);
const var_decl = try p.parseVarDecl();
if (var_decl != 0) {
- _ = try p.expectToken(.semicolon);
+ try p.expectSemicolon(.expected_semi_after_decl, false);
return var_decl;
}
if (thread_local_token != null) {
@@ -614,7 +614,7 @@ const Parser = struct {
fn expectUsingNamespace(p: *Parser) !Node.Index {
const usingnamespace_token = p.assertToken(.keyword_usingnamespace);
const expr = try p.expectExpr();
- _ = try p.expectToken(.semicolon);
+ try p.expectSemicolon(.expected_semi_after_decl, false);
return p.addNode(.{
.tag = .@"usingnamespace",
.main_token = usingnamespace_token,
@@ -851,7 +851,7 @@ const Parser = struct {
const var_decl = try p.parseVarDecl();
if (var_decl != 0) {
- _ = try p.expectTokenRecoverable(.semicolon);
+ try p.expectSemicolon(.expected_semi_after_decl, true);
return var_decl;
}
@@ -915,7 +915,7 @@ const Parser = struct {
const assign_expr = try p.parseAssignExpr();
if (assign_expr != 0) {
- _ = try p.expectTokenRecoverable(.semicolon);
+ try p.expectSemicolon(.expected_semi_after_stmt, true);
return assign_expr;
}
@@ -1205,7 +1205,7 @@ const Parser = struct {
}
const assign_expr = try p.parseAssignExpr();
if (assign_expr != 0) {
- _ = try p.expectTokenRecoverable(.semicolon);
+ try p.expectSemicolon(.expected_semi_after_stmt, true);
return assign_expr;
}
return null_node;
@@ -3664,6 +3664,15 @@ const Parser = struct {
}
}
+ fn expectSemicolon(p: *Parser, tag: AstError.Tag, recoverable: bool) Error!void {
+ if (p.token_tags[p.tok_i] == .semicolon) {
+ _ = p.nextToken();
+ return;
+ }
+ try p.warnMsg(.{ .tag = tag, .token = p.tok_i - 1 });
+ if (!recoverable) return error.ParseError;
+ }
+
fn nextToken(p: *Parser) TokenIndex {
const result = p.tok_i;
p.tok_i += 1;
diff --git a/src/Module.zig b/src/Module.zig
index 3631e41f25..2cd01acd59 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -2995,13 +2995,14 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const token_starts = file.tree.tokens.items(.start);
const token_tags = file.tree.tokens.items(.tag);
+ const extra_offset = file.tree.errorOffset(parse_err.tag, parse_err.token);
try file.tree.renderError(parse_err, msg.writer());
const err_msg = try gpa.create(ErrorMsg);
err_msg.* = .{
.src_loc = .{
.file_scope = file,
.parent_decl_node = 0,
- .lazy = .{ .byte_abs = token_starts[parse_err.token] },
+ .lazy = .{ .byte_abs = token_starts[parse_err.token] + extra_offset },
},
.msg = msg.toOwnedSlice(),
};
diff --git a/src/main.zig b/src/main.zig
index 75655d6a2a..12e9f88088 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -4040,13 +4040,14 @@ fn printErrMsgToStdErr(
notes_len += 1;
}
+ const extra_offset = tree.errorOffset(parse_error.tag, parse_error.token);
const message: Compilation.AllErrors.Message = .{
.src = .{
.src_path = path,
.msg = text,
- .byte_offset = @intCast(u32, start_loc.line_start),
+ .byte_offset = @intCast(u32, start_loc.line_start) + extra_offset,
.line = @intCast(u32, start_loc.line),
- .column = @intCast(u32, start_loc.column),
+ .column = @intCast(u32, start_loc.column) + extra_offset,
.source_line = source_line,
.notes = notes_buffer[0..notes_len],
},
From 6456af5a45cc12c0cd28d957dfedc72c369a157e Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sun, 13 Feb 2022 13:18:30 +0200
Subject: [PATCH 0156/2031] parser: make missing comma errors point to the end
of the previous token
---
lib/std/zig/Ast.zig | 35 ++++++++++++++---
lib/std/zig/parse.zig | 88 +++++++++++++++++++++----------------------
2 files changed, 71 insertions(+), 52 deletions(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 16430fe9d4..c12c230c69 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -70,6 +70,13 @@ pub fn errorOffset(tree:Ast, error_tag: Error.Tag, token: TokenIndex) u32 {
return switch (error_tag) {
.expected_semi_after_decl,
.expected_semi_after_stmt,
+ .expected_comma_after_field,
+ .expected_comma_after_arg,
+ .expected_comma_after_param,
+ .expected_comma_after_initializer,
+ .expected_comma_after_switch_prong,
+ .expected_semi_or_else,
+ .expected_semi_or_lbrace,
=> @intCast(u32, tree.tokenSlice(token).len),
else => 0,
};
@@ -227,14 +234,10 @@ pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void {
});
},
.expected_semi_or_else => {
- return stream.print("expected ';' or 'else', found '{s}'", .{
- token_tags[parse_error.token].symbol(),
- });
+ return stream.writeAll("expected ';' or 'else' after statement");
},
.expected_semi_or_lbrace => {
- return stream.print("expected ';' or '{{', found '{s}'", .{
- token_tags[parse_error.token].symbol(),
- });
+ return stream.writeAll("expected ';' or block after function prototype");
},
.expected_statement => {
return stream.print("expected statement, found '{s}'", .{
@@ -323,6 +326,21 @@ pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void {
.expected_semi_after_stmt => {
return stream.writeAll("expected ';' after statement");
},
+ .expected_comma_after_field => {
+ return stream.writeAll("expected ',' after field");
+ },
+ .expected_comma_after_arg => {
+ return stream.writeAll("expected ',' after argument");
+ },
+ .expected_comma_after_param => {
+ return stream.writeAll("expected ',' after parameter");
+ },
+ .expected_comma_after_initializer => {
+ return stream.writeAll("expected ',' after initializer");
+ },
+ .expected_comma_after_switch_prong => {
+ return stream.writeAll("expected ',' after switch prong");
+ },
.expected_token => {
const found_tag = token_tags[parse_error.token];
@@ -2516,6 +2534,11 @@ pub const Error = struct {
// these have `token` set to token after which a semicolon was expected
expected_semi_after_decl,
expected_semi_after_stmt,
+ expected_comma_after_field,
+ expected_comma_after_arg,
+ expected_comma_after_param,
+ expected_comma_after_initializer,
+ expected_comma_after_switch_prong,
/// `expected_tag` is populated.
expected_token,
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index e818046ac5..3b9679f62d 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -160,6 +160,12 @@ const Parser = struct {
.extra = .{ .expected_tag = expected_token },
});
}
+
+ fn warnExpectedAfter(p: *Parser, error_tag: AstError.Tag) error{OutOfMemory}!void {
+ @setCold(true);
+ try p.warnMsg(.{ .tag = error_tag, .token = p.tok_i - 1 });
+ }
+
fn warnMsg(p: *Parser, msg: Ast.Error) error{OutOfMemory}!void {
@setCold(true);
try p.errors.append(p.gpa, msg);
@@ -258,7 +264,7 @@ const Parser = struct {
}
// There is not allowed to be a decl after a field with no comma.
// Report error but recover parser.
- try p.warnExpected(.comma);
+ try p.warnExpectedAfter(.expected_comma_after_field);
p.findNextContainerMember();
}
},
@@ -361,7 +367,7 @@ const Parser = struct {
}
// There is not allowed to be a decl after a field with no comma.
// Report error but recover parser.
- try p.warnExpected(.comma);
+ try p.warnExpectedAfter(.expected_comma_after_field);
p.findNextContainerMember();
}
},
@@ -573,7 +579,7 @@ const Parser = struct {
// Since parseBlock only return error.ParseError on
// a missing '}' we can assume this function was
// supposed to end here.
- try p.warn(.expected_semi_or_lbrace);
+ try p.warnExpectedAfter(.expected_semi_or_lbrace);
return null_node;
},
}
@@ -984,7 +990,7 @@ const Parser = struct {
};
_ = p.eatToken(.keyword_else) orelse {
if (else_required) {
- try p.warn(.expected_semi_or_else);
+ try p.warnExpectedAfter(.expected_semi_or_else);
}
return p.addNode(.{
.tag = .if_simple,
@@ -1079,7 +1085,7 @@ const Parser = struct {
};
_ = p.eatToken(.keyword_else) orelse {
if (else_required) {
- try p.warn(.expected_semi_or_else);
+ try p.warnExpectedAfter(.expected_semi_or_else);
}
return p.addNode(.{
.tag = .for_simple,
@@ -1154,7 +1160,7 @@ const Parser = struct {
};
_ = p.eatToken(.keyword_else) orelse {
if (else_required) {
- try p.warn(.expected_semi_or_else);
+ try p.warnExpectedAfter(.expected_semi_or_else);
}
if (cont_expr == 0) {
return p.addNode(.{
@@ -2038,7 +2044,7 @@ const Parser = struct {
},
.colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_initializer),
}
if (p.eatToken(.r_brace)) |_| break;
const next = try p.expectFieldInit();
@@ -2079,7 +2085,7 @@ const Parser = struct {
},
.colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_initializer),
}
}
const comma = (p.token_tags[p.tok_i - 2] == .comma);
@@ -2158,7 +2164,7 @@ const Parser = struct {
},
.colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_arg),
}
}
const comma = (p.token_tags[p.tok_i - 2] == .comma);
@@ -2214,7 +2220,7 @@ const Parser = struct {
},
.colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_arg),
}
}
const comma = (p.token_tags[p.tok_i - 2] == .comma);
@@ -2455,7 +2461,7 @@ const Parser = struct {
},
.colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_initializer),
}
if (p.eatToken(.r_brace)) |_| break;
const next = try p.expectFieldInit();
@@ -2507,7 +2513,7 @@ const Parser = struct {
},
.colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_initializer),
}
}
const comma = (p.token_tags[p.tok_i - 2] == .comma);
@@ -2568,7 +2574,7 @@ const Parser = struct {
},
.colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_field),
}
}
return p.addNode(.{
@@ -3383,7 +3389,24 @@ const Parser = struct {
/// SwitchProngList <- (SwitchProng COMMA)* SwitchProng?
fn parseSwitchProngList(p: *Parser) !Node.SubRange {
- return ListParseFn(parseSwitchProng)(p);
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ while (true) {
+ const item = try parseSwitchProng(p);
+ if (item == 0) break;
+
+ try p.scratch.append(p.gpa, item);
+
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpectedAfter(.expected_comma_after_switch_prong),
+ }
+ }
+ return p.listToSpan(p.scratch.items[scratch_top..]);
}
/// ParamDeclList <- (ParamDecl COMMA)* ParamDecl?
@@ -3409,7 +3432,7 @@ const Parser = struct {
},
.colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_param),
}
}
if (varargs == .nonfinal) {
@@ -3423,33 +3446,6 @@ const Parser = struct {
};
}
- const NodeParseFn = fn (p: *Parser) Error!Node.Index;
-
- fn ListParseFn(comptime nodeParseFn: anytype) (fn (p: *Parser) Error!Node.SubRange) {
- return struct {
- pub fn parse(p: *Parser) Error!Node.SubRange {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- while (true) {
- const item = try nodeParseFn(p);
- if (item == 0) break;
-
- try p.scratch.append(p.gpa, item);
-
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- return p.listToSpan(p.scratch.items[scratch_top..]);
- }
- }.parse;
- }
-
/// FnCallArguments <- LPAREN ExprList RPAREN
/// ExprList <- (Expr COMMA)* Expr?
fn parseBuiltinCall(p: *Parser) !Node.Index {
@@ -3480,7 +3476,7 @@ const Parser = struct {
break;
},
// Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
+ else => try p.warnExpectedAfter(.expected_comma_after_arg),
}
}
const comma = (p.token_tags[p.tok_i - 2] == .comma);
@@ -3576,7 +3572,7 @@ const Parser = struct {
}
/// KEYWORD_if LPAREN Expr RPAREN PtrPayload? Body (KEYWORD_else Payload? Body)?
- fn parseIf(p: *Parser, bodyParseFn: NodeParseFn) !Node.Index {
+ fn parseIf(p: *Parser, bodyParseFn: fn (p: *Parser) Error!Node.Index) !Node.Index {
const if_token = p.eatToken(.keyword_if) orelse return null_node;
_ = try p.expectToken(.l_paren);
const condition = try p.expectExpr();
@@ -3664,12 +3660,12 @@ const Parser = struct {
}
}
- fn expectSemicolon(p: *Parser, tag: AstError.Tag, recoverable: bool) Error!void {
+ fn expectSemicolon(p: *Parser, error_tag: AstError.Tag, recoverable: bool) Error!void {
if (p.token_tags[p.tok_i] == .semicolon) {
_ = p.nextToken();
return;
}
- try p.warnMsg(.{ .tag = tag, .token = p.tok_i - 1 });
+ try p.warnExpectedAfter(error_tag);
if (!recoverable) return error.ParseError;
}
From 8a432436aedd8b10bb837965efe630ce58f89a0b Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sun, 13 Feb 2022 13:39:03 +0200
Subject: [PATCH 0157/2031] update compile error tests
---
test/compile_errors.zig | 44 ++++++++++++++++++++---------------------
test/stage2/cbe.zig | 2 +-
2 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 3c224013c9..79c17b4336 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2171,7 +2171,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = x;
\\}
, &[_][]const u8{
- "tmp.zig:3:7: error: expected ',', found 'align'",
+ "tmp.zig:3:6: error: expected ',' after field",
});
ctx.objErrStage1("bad alignment type",
@@ -4704,7 +4704,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:9: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - block expr",
@@ -4715,7 +4715,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:11: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - comptime statement",
@@ -4726,7 +4726,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:18: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - comptime expression",
@@ -4737,7 +4737,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:20: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - defer",
@@ -4748,7 +4748,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:15: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - if statement",
@@ -4759,7 +4759,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';' or 'else', found 'var'",
+ "tmp.zig:4:18: error: expected ';' or 'else' after statement",
});
ctx.objErrStage1("implicit semicolon - if expression",
@@ -4770,7 +4770,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:20: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - if-else statement",
@@ -4781,7 +4781,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:28: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - if-else expression",
@@ -4792,7 +4792,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:28: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - if-else-if statement",
@@ -4803,7 +4803,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';' or 'else', found 'var'",
+ "tmp.zig:4:37: error: expected ';' or 'else' after statement",
});
ctx.objErrStage1("implicit semicolon - if-else-if expression",
@@ -4814,7 +4814,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:37: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - if-else-if-else statement",
@@ -4825,7 +4825,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:47: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - if-else-if-else expression",
@@ -4836,7 +4836,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:45: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - test statement",
@@ -4847,7 +4847,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';' or 'else', found 'var'",
+ "tmp.zig:4:24: error: expected ';' or 'else' after statement",
});
ctx.objErrStage1("implicit semicolon - test expression",
@@ -4858,7 +4858,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:26: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - while statement",
@@ -4869,7 +4869,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';' or 'else', found 'var'",
+ "tmp.zig:4:21: error: expected ';' or 'else' after statement",
});
ctx.objErrStage1("implicit semicolon - while expression",
@@ -4880,7 +4880,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:23: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - while-continue statement",
@@ -4891,7 +4891,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';' or 'else', found 'var'",
+ "tmp.zig:4:26: error: expected ';' or 'else' after statement",
});
ctx.objErrStage1("implicit semicolon - while-continue expression",
@@ -4902,7 +4902,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:28: error: expected ';' after statement",
});
ctx.objErrStage1("implicit semicolon - for statement",
@@ -4913,7 +4913,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';' or 'else', found 'var'",
+ "tmp.zig:4:24: error: expected ';' or 'else' after statement",
});
ctx.objErrStage1("implicit semicolon - for expression",
@@ -4924,7 +4924,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ var bad = {};
\\}
, &[_][]const u8{
- "tmp.zig:5:5: error: expected ';', found 'var'",
+ "tmp.zig:4:26: error: expected ';' after statement",
});
ctx.objErrStage1("multiple function definitions",
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index cfb9831e40..949a7eb6b7 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -693,7 +693,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = E1.a;
\\}
, &.{
- ":3:7: error: expected ',', found 'align'",
+ ":3:6: error: expected ',' after field",
});
// Redundant non-exhaustive enum mark.
From 0699b29ce0c5d264bf39c5f88fa6025b61ca6303 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sun, 13 Feb 2022 14:21:53 +0200
Subject: [PATCH 0158/2031] parser: give better errors for misplaced `.{`
---
lib/std/zig/Ast.zig | 2 +-
lib/std/zig/parse.zig | 4 ++++
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index c12c230c69..65fea1ae2e 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -66,7 +66,7 @@ pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void
/// Returns an extra offset for column and byte offset of errors that
/// should point after the token in the error message.
-pub fn errorOffset(tree:Ast, error_tag: Error.Tag, token: TokenIndex) u32 {
+pub fn errorOffset(tree: Ast, error_tag: Error.Tag, token: TokenIndex) u32 {
return switch (error_tag) {
.expected_semi_after_decl,
.expected_semi_after_stmt,
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 3b9679f62d..9c6f873748 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -3229,6 +3229,10 @@ const Parser = struct {
.rhs = p.nextToken(),
},
}),
+ .l_brace => {
+ // this a misplaced `.{`, handle the error somewhere else
+ return null_node;
+ },
else => {
p.tok_i += 1;
try p.warn(.expected_suffix_op);
From 17822e4a050dcbcf7fe324fcdff68887fee73ff1 Mon Sep 17 00:00:00 2001
From: sharpobject
Date: Thu, 10 Feb 2022 05:57:10 +0900
Subject: [PATCH 0159/2031] std.json: fix compile error for comptime fields
This is covered by an existing test which was already failing.
---
lib/std/json.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/json.zig b/lib/std/json.zig
index ec3544364f..e9fde26bec 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1766,7 +1766,7 @@ fn parseInternal(
}
}
if (field.is_comptime) {
- if (!try parsesTo(field.field_type, field.default_value.?, tokens, child_options)) {
+ if (!try parsesTo(field.field_type, @ptrCast(*const field.field_type, field.default_value.?).*, tokens, child_options)) {
return error.UnexpectedValue;
}
} else {
From 8937f18a6f8496e011b13cb086b7948b5f1d540e Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Sun, 13 Feb 2022 14:35:49 +0200
Subject: [PATCH 0160/2031] std: force refAllDecls to actually resolve all
decls
Only about half of the tests in std were actually being run (918 vs 2144).
---
lib/std/testing.zig | 2 ++
lib/std/zig/c_translation.zig | 2 +-
lib/std/zig/parser_test.zig | 20 +++++++++++---------
3 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index a9874d4df1..1134717c02 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -466,6 +466,8 @@ test {
pub fn refAllDecls(comptime T: type) void {
if (!builtin.is_test) return;
inline for (comptime std.meta.declarations(T)) |decl| {
+ if (decl.is_pub and @typeInfo(@TypeOf(@field(T, decl.name))) == .Struct)
+ _ = @hasDecl(@field(T, decl.name), "foo");
_ = decl;
}
}
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index 0062b071c2..67eceda937 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -166,7 +166,7 @@ pub fn sizeof(target: anytype) usize {
const array_info = @typeInfo(ptr.child).Array;
if ((array_info.child == u8 or array_info.child == u16) and
array_info.sentinel != null and
- array_info.sentinel.? == 0)
+ @ptrCast(*const array_info.child, array_info.sentinel.?).* == 0)
{
// length of the string plus one for the null terminator.
return (array_info.len + 1) * @sizeOf(array_info.child);
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 14b0e8b501..a0cc11ce4b 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -585,15 +585,6 @@ test "zig fmt: asm expression with comptime content" {
);
}
-test "zig fmt: anytype struct field" {
- try testCanonical(
- \\pub const Pointer = struct {
- \\ sentinel: anytype,
- \\};
- \\
- );
-}
-
test "zig fmt: array types last token" {
try testCanonical(
\\test {
@@ -4148,6 +4139,17 @@ test "zig fmt: container doc comments" {
);
}
+test "zig fmt: anytype struct field" {
+ try testError(
+ \\pub const Pointer = struct {
+ \\ sentinel: anytype,
+ \\};
+ \\
+ , &[_]Error{
+ .expected_type_expr,
+ });
+}
+
test "zig fmt: extern without container keyword returns error" {
try testError(
\\const container = extern {};
From 3bbe6a28e069b03c8a9185dd14129517453e26d2 Mon Sep 17 00:00:00 2001
From: Jacob G-W
Date: Thu, 27 Jan 2022 15:23:28 -0500
Subject: [PATCH 0161/2031] stage2: add decltests
---
lib/std/zig/Ast.zig | 2 +-
lib/std/zig/parse.zig | 10 ++++-
lib/std/zig/render.zig | 3 +-
src/AstGen.zig | 91 +++++++++++++++++++++++++++++++++-----
src/Module.zig | 6 +++
src/Zir.zig | 3 +-
src/print_zir.zig | 10 +++--
test/behavior.zig | 5 +++
test/behavior/decltest.zig | 7 +++
9 files changed, 119 insertions(+), 18 deletions(-)
create mode 100644 test/behavior/decltest.zig
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 17da4f5315..a756834c58 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -2519,7 +2519,7 @@ pub const Node = struct {
root,
/// `usingnamespace lhs;`. rhs unused. main_token is `usingnamespace`.
@"usingnamespace",
- /// lhs is test name token (must be string literal), if any.
+ /// lhs is test name token (must be string literal or identifier), if any.
/// rhs is the body node.
test_decl,
/// lhs is the index into extra_data.
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index a70d0309e3..cf27e6b1c7 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -500,10 +500,16 @@ const Parser = struct {
}
}
- /// TestDecl <- KEYWORD_test STRINGLITERALSINGLE? Block
+ /// TestDecl <- KEYWORD_test (STRINGLITERALSINGLE / IDENTIFIER)? Block
fn expectTestDecl(p: *Parser) !Node.Index {
const test_token = p.assertToken(.keyword_test);
- const name_token = p.eatToken(.string_literal);
+ const name_token = switch (p.token_tags[p.nextToken()]) {
+ .string_literal, .identifier => p.tok_i - 1,
+ else => blk: {
+ p.tok_i -= 1;
+ break :blk null;
+ },
+ };
const block_node = try p.parseBlock();
if (block_node == 0) return p.fail(.expected_block);
return p.addNode(.{
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index f17ee1e097..0f6fcac8b7 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -151,7 +151,8 @@ fn renderMember(gpa: Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, spac
.test_decl => {
const test_token = main_tokens[decl];
try renderToken(ais, tree, test_token, .space);
- if (token_tags[test_token + 1] == .string_literal) {
+ const test_name_tag = token_tags[test_token + 1];
+ if (test_name_tag == .string_literal or test_name_tag == .identifier) {
try renderToken(ais, tree, test_token + 1, .space);
}
try renderExpression(gpa, ais, tree, datas[decl].rhs, space);
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 9bc10f25e8..0652a6232f 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -105,8 +105,8 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
};
defer astgen.deinit(gpa);
- // String table indexes 0 and 1 are reserved for special meaning.
- try astgen.string_bytes.appendSlice(gpa, &[_]u8{ 0, 0 });
+ // String table indexes 0, 1, 2 are reserved for special meaning.
+ try astgen.string_bytes.appendSlice(gpa, &[_]u8{ 0, 0, 0 });
// We expect at least as many ZIR instructions and extra data items
// as AST nodes.
@@ -3736,13 +3736,78 @@ fn testDecl(
};
defer decl_block.unstack();
+ const main_tokens = tree.nodes.items(.main_token);
+ const token_tags = tree.tokens.items(.tag);
+ const test_token = main_tokens[node];
+ const test_name_token = test_token + 1;
+ const test_name_token_tag = token_tags[test_name_token];
+ const is_decltest = test_name_token_tag == .identifier;
const test_name: u32 = blk: {
- const main_tokens = tree.nodes.items(.main_token);
- const token_tags = tree.tokens.items(.tag);
- const test_token = main_tokens[node];
- const str_lit_token = test_token + 1;
- if (token_tags[str_lit_token] == .string_literal) {
- break :blk try astgen.testNameString(str_lit_token);
+ if (test_name_token_tag == .string_literal) {
+ break :blk try astgen.testNameString(test_name_token);
+ } else if (test_name_token_tag == .identifier) {
+ const ident_name_raw = tree.tokenSlice(test_name_token);
+
+ if (mem.eql(u8, ident_name_raw, "_")) return astgen.failTok(test_name_token, "'_' used as an identifier without @\"_\" syntax", .{});
+
+ // if not @"" syntax, just use raw token slice
+ if (ident_name_raw[0] != '@') {
+ if (primitives.get(ident_name_raw)) |_| return astgen.failTok(test_name_token, "cannot test a primitive", .{});
+
+ if (ident_name_raw.len >= 2) integer: {
+ const first_c = ident_name_raw[0];
+ if (first_c == 'i' or first_c == 'u') {
+ _ = switch (first_c == 'i') {
+ true => .signed,
+ false => .unsigned,
+ };
+ _ = parseBitCount(ident_name_raw[1..]) catch |err| switch (err) {
+ error.Overflow => return astgen.failTok(
+ test_name_token,
+ "primitive integer type '{s}' exceeds maximum bit width of 65535",
+ .{ident_name_raw},
+ ),
+ error.InvalidCharacter => break :integer,
+ };
+ return astgen.failTok(test_name_token, "cannot test a primitive", .{});
+ }
+ }
+ }
+
+ // Local variables, including function parameters.
+ const name_str_index = try astgen.identAsString(test_name_token);
+ var s = scope;
+ var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
+ var num_namespaces_out: u32 = 0;
+ var capturing_namespace: ?*Scope.Namespace = null;
+ while (true) switch (s.tag) {
+ .local_val, .local_ptr => unreachable, // a test cannot be in a local scope
+ .gen_zir => s = s.cast(GenZir).?.parent,
+ .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
+ .namespace => {
+ const ns = s.cast(Scope.Namespace).?;
+ if (ns.decls.get(name_str_index)) |i| {
+ if (found_already) |f| {
+ return astgen.failTokNotes(test_name_token, "ambiguous reference", .{}, &.{
+ try astgen.errNoteNode(f, "declared here", .{}),
+ try astgen.errNoteNode(i, "also declared here", .{}),
+ });
+ }
+ // We found a match but must continue looking for ambiguous references to decls.
+ found_already = i;
+ }
+ num_namespaces_out += 1;
+ capturing_namespace = ns;
+ s = ns.parent;
+ },
+ .top => break,
+ };
+ if (found_already == null) {
+ const ident_name = try astgen.identifierTokenString(test_name_token);
+ return astgen.failTok(test_name_token, "use of undeclared identifier '{s}'", .{ident_name});
+ }
+
+ break :blk name_str_index;
}
// String table index 1 has a special meaning here of test decl with no name.
break :blk 1;
@@ -3804,9 +3869,15 @@ fn testDecl(
const line_delta = decl_block.decl_line - gz.decl_line;
wip_members.appendToDecl(line_delta);
}
- wip_members.appendToDecl(test_name);
+ if (is_decltest)
+ wip_members.appendToDecl(2) // 2 here means that it is a decltest, look at doc comment for name
+ else
+ wip_members.appendToDecl(test_name);
wip_members.appendToDecl(block_inst);
- wip_members.appendToDecl(0); // no doc comments on test decls
+ if (is_decltest)
+ wip_members.appendToDecl(test_name) // the doc comment on a decltest represents it's name
+ else
+ wip_members.appendToDecl(0); // no doc comments on test decls
}
fn structDeclInner(
diff --git a/src/Module.zig b/src/Module.zig
index 3631e41f25..b9e50355fd 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -4170,6 +4170,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
const line_off = zir.extra[decl_sub_index + 4];
const line = iter.parent_decl.relativeToLine(line_off);
const decl_name_index = zir.extra[decl_sub_index + 5];
+ const decl_doccomment_index = zir.extra[decl_sub_index + 7];
const decl_index = zir.extra[decl_sub_index + 6];
const decl_block_inst_data = zir.instructions.items(.data)[decl_index].pl_node;
const decl_node = iter.parent_decl.relativeToNodeIndex(decl_block_inst_data.src_node);
@@ -4193,6 +4194,11 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
iter.unnamed_test_index += 1;
break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i});
},
+ 2 => name: {
+ is_named_test = true;
+ const test_name = zir.nullTerminatedString(decl_doccomment_index);
+ break :name try std.fmt.allocPrintZ(gpa, "decltest.{s}", .{test_name});
+ },
else => name: {
const raw_name = zir.nullTerminatedString(decl_name_index);
if (raw_name.len == 0) {
diff --git a/src/Zir.zig b/src/Zir.zig
index b7e3e60916..b8ff7ae50f 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -2579,10 +2579,11 @@ pub const Inst = struct {
/// - 0 means comptime or usingnamespace decl.
/// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace
/// - 1 means test decl with no name.
+ /// - 2 means that the test is a decltest, doc_comment gives the name of the identifier
/// - if there is a 0 byte at the position `name` indexes, it indicates
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
- /// doc_comment: u32, // 0 if no doc comment
+ /// doc_comment: u32, 0 if no doc comment, if this is a decltest, doc_comment references the decl name in the string table
/// align: Ref, // if corresponding bit is set
/// link_section_or_address_space: { // if corresponding bit is set.
/// link_section: Ref,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 9c79ad1a37..6396f11467 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -1443,20 +1443,24 @@ const Writer = struct {
} else if (decl_name_index == 1) {
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("test");
+ } else if (decl_name_index == 2) {
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("[{d}] decltest {s}", .{ sub_index, self.code.nullTerminatedString(doc_comment_index) });
} else {
const raw_decl_name = self.code.nullTerminatedString(decl_name_index);
const decl_name = if (raw_decl_name.len == 0)
self.code.nullTerminatedString(decl_name_index + 1)
else
raw_decl_name;
- const test_str = if (raw_decl_name.len == 0) "test " else "";
+ const test_str = if (raw_decl_name.len == 0) "test \"" else "";
const export_str = if (is_exported) "export " else "";
try self.writeDocComment(stream, doc_comment_index);
try stream.writeByteNTimes(' ', self.indent);
- try stream.print("[{d}] {s}{s}{s}{}", .{
- sub_index, pub_str, test_str, export_str, std.zig.fmtId(decl_name),
+ const endquote_if_test: []const u8 = if (raw_decl_name.len == 0) "\"" else "";
+ try stream.print("[{d}] {s}{s}{s}{}{s}", .{
+ sub_index, pub_str, test_str, export_str, std.zig.fmtId(decl_name), endquote_if_test,
});
if (align_inst != .none) {
try stream.writeAll(" align(");
diff --git a/test/behavior.zig b/test/behavior.zig
index 6b08465429..db6863a8b0 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -49,6 +49,11 @@ test {
_ = @import("behavior/type.zig");
_ = @import("behavior/var_args.zig");
+ // tests that don't pass for stage1
+ if (builtin.zig_backend != .stage1) {
+ _ = @import("behavior/decltest.zig");
+ }
+
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
_ = @import("behavior/bitcast.zig");
diff --git a/test/behavior/decltest.zig b/test/behavior/decltest.zig
new file mode 100644
index 0000000000..f731f80fb2
--- /dev/null
+++ b/test/behavior/decltest.zig
@@ -0,0 +1,7 @@
+pub fn the_add_function(a: u32, b: u32) u32 {
+ return a + b;
+}
+
+test the_add_function {
+ if (the_add_function(1, 2) != 3) unreachable;
+}
From f516e2c5b1d0e073d9ab4cd417aeb21f6cdf4a99 Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Fri, 11 Feb 2022 09:24:08 -0700
Subject: [PATCH 0162/2031] Simplify implementation of floorPowerOfTwo in
std.math
---
lib/std/math.zig | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 4b8bcf2287..71cb5a184c 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -1045,14 +1045,9 @@ pub fn isPowerOfTwo(v: anytype) bool {
/// Returns the nearest power of two less than or equal to value, or
/// zero if value is less than or equal to zero.
pub fn floorPowerOfTwo(comptime T: type, value: T) T {
- var x = value;
-
- comptime var i = 1;
- inline while (@typeInfo(T).Int.bits > i) : (i *= 2) {
- x |= (x >> i);
- }
-
- return x - (x >> 1);
+ const uT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
+ if (value <= 0) return 0;
+ return @as(T, 1) << log2_int(uT, @intCast(uT, value));
}
test "math.floorPowerOfTwo" {
@@ -1064,9 +1059,15 @@ fn testFloorPowerOfTwo() !void {
try testing.expect(floorPowerOfTwo(u32, 63) == 32);
try testing.expect(floorPowerOfTwo(u32, 64) == 64);
try testing.expect(floorPowerOfTwo(u32, 65) == 64);
+ try testing.expect(floorPowerOfTwo(u32, 0) == 0);
try testing.expect(floorPowerOfTwo(u4, 7) == 4);
try testing.expect(floorPowerOfTwo(u4, 8) == 8);
try testing.expect(floorPowerOfTwo(u4, 9) == 8);
+ try testing.expect(floorPowerOfTwo(u4, 0) == 0);
+ try testing.expect(floorPowerOfTwo(i4, 7) == 4);
+ try testing.expect(floorPowerOfTwo(i4, -8) == 0);
+ try testing.expect(floorPowerOfTwo(i4, -1) == 0);
+ try testing.expect(floorPowerOfTwo(i4, 0) == 0);
}
/// Returns the next power of two (if the value is not already a power of two).
From f22443bb05a6be6c3ade08254f52fdd05eeb2910 Mon Sep 17 00:00:00 2001
From: Sebsatian Keller
Date: Sun, 13 Feb 2022 14:19:33 +0100
Subject: [PATCH 0163/2031] Fixed progress indicator for `zig test` (#10859)
Previously the progress displayed the first item as [0/x]. This was
misleading when x is the number of items. The first item should be
displayed as [1/x]
---
lib/std/Progress.zig | 5 +++--
lib/std/special/test_runner.zig | 4 ++--
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index 24b66c1162..ecef04c600 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -261,6 +261,7 @@ fn refreshWithHeldLock(self: *Progress) void {
need_ellipse = false;
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
+ const current_item = completed_items + 1;
if (node.name.len != 0 or eti > 0) {
if (node.name.len != 0) {
self.bufWrite(&end, "{s}", .{node.name});
@@ -268,11 +269,11 @@ fn refreshWithHeldLock(self: *Progress) void {
}
if (eti > 0) {
if (need_ellipse) self.bufWrite(&end, " ", .{});
- self.bufWrite(&end, "[{d}/{d}] ", .{ completed_items, eti });
+ self.bufWrite(&end, "[{d}/{d}] ", .{ current_item, eti });
need_ellipse = false;
} else if (completed_items != 0) {
if (need_ellipse) self.bufWrite(&end, " ", .{});
- self.bufWrite(&end, "[{d}] ", .{completed_items});
+ self.bufWrite(&end, "[{d}] ", .{current_item});
need_ellipse = false;
}
}
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index fb00a9dc30..201a5ccd90 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -82,18 +82,18 @@ pub fn main() void {
} else |err| switch (err) {
error.SkipZigTest => {
skip_count += 1;
- test_node.end();
progress.log("{s}... SKIP\n", .{test_fn.name});
if (!have_tty) std.debug.print("SKIP\n", .{});
+ test_node.end();
},
else => {
fail_count += 1;
- test_node.end();
progress.log("{s}... FAIL ({s})\n", .{ test_fn.name, @errorName(err) });
if (!have_tty) std.debug.print("FAIL ({s})\n", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
+ test_node.end();
},
}
}
From b5f8fb85e64022ed1ee59ff70753577839ad41b6 Mon Sep 17 00:00:00 2001
From: Mateusz Radomski <33978857+m-radomski@users.noreply.github.com>
Date: Sun, 13 Feb 2022 14:37:38 +0100
Subject: [PATCH 0164/2031] Implement f128 `@rem`
---
lib/std/special/compiler_rt.zig | 3 +
lib/std/special/compiler_rt/floatfmodl.zig | 126 ++++++++++++++++++
.../special/compiler_rt/floatfmodl_test.zig | 46 +++++++
src/stage1/ir.cpp | 32 ++++-
src/value.zig | 9 +-
test/behavior/math.zig | 60 ++++++++-
6 files changed, 263 insertions(+), 13 deletions(-)
create mode 100644 lib/std/special/compiler_rt/floatfmodl.zig
create mode 100644 lib/std/special/compiler_rt/floatfmodl_test.zig
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index 36f703464a..3ef2bf4747 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -759,6 +759,9 @@ comptime {
@export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
}
+ const fmodl = @import("compiler_rt/floatfmodl.zig").fmodl;
+ @export(fmodl, .{ .name = "fmodl", .linkage = linkage });
+
@export(floorf, .{ .name = "floorf", .linkage = linkage });
@export(floor, .{ .name = "floor", .linkage = linkage });
@export(floorl, .{ .name = "floorl", .linkage = linkage });
diff --git a/lib/std/special/compiler_rt/floatfmodl.zig b/lib/std/special/compiler_rt/floatfmodl.zig
new file mode 100644
index 0000000000..942a7c1125
--- /dev/null
+++ b/lib/std/special/compiler_rt/floatfmodl.zig
@@ -0,0 +1,126 @@
+const builtin = @import("builtin");
+const std = @import("std");
+
+// fmodl - floating modulo large, returns the remainder of division for f128 types
+// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
+pub fn fmodl(a: f128, b: f128) callconv(.C) f128 {
+ @setRuntimeSafety(builtin.is_test);
+ var amod = a;
+ var bmod = b;
+ const aPtr_u64 = @ptrCast([*]u64, &amod);
+ const bPtr_u64 = @ptrCast([*]u64, &bmod);
+ const aPtr_u16 = @ptrCast([*]u16, &amod);
+ const bPtr_u16 = @ptrCast([*]u16, &bmod);
+
+ const exp_and_sign_index = comptime switch (builtin.target.cpu.arch.endian()) {
+ .Little => 7,
+ .Big => 0,
+ };
+ const low_index = comptime switch (builtin.target.cpu.arch.endian()) {
+ .Little => 0,
+ .Big => 1,
+ };
+ const high_index = comptime switch (builtin.target.cpu.arch.endian()) {
+ .Little => 1,
+ .Big => 0,
+ };
+
+ const signA = aPtr_u16[exp_and_sign_index] & 0x8000;
+ var expA = @intCast(i32, (aPtr_u16[exp_and_sign_index] & 0x7fff));
+ var expB = bPtr_u16[exp_and_sign_index] & 0x7fff;
+
+ // There are 3 cases where the answer is undefined, check for:
+ // - fmodl(val, 0)
+ // - fmodl(val, NaN)
+ // - fmodl(inf, val)
+ // The sign on checked values does not matter.
+ // Doing (a * b) / (a * b) procudes undefined results
+ // because the three cases always produce undefined calculations:
+ // - 0 / 0
+ // - val * NaN
+ // - inf / inf
+ if (b == 0 or std.math.isNan(b) or expA == 0x7fff) {
+ return (a * b) / (a * b);
+ }
+
+ // Remove the sign from both
+ aPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expA));
+ bPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expB));
+ if (amod <= bmod) {
+ if (amod == bmod) {
+ return 0 * a;
+ }
+ return a;
+ }
+
+ if (expA == 0) {
+ amod *= 0x1p120;
+ expA = aPtr_u16[exp_and_sign_index] -% 120;
+ }
+
+ if (expB == 0) {
+ bmod *= 0x1p120;
+ expB = bPtr_u16[exp_and_sign_index] -% 120;
+ }
+
+ // OR in extra non-stored mantissa digit
+ var highA: u64 = (aPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
+ var highB: u64 = (bPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
+ var lowA: u64 = aPtr_u64[low_index];
+ var lowB: u64 = bPtr_u64[low_index];
+
+ while (expA > expB) : (expA -= 1) {
+ var high = highA -% highB;
+ var low = lowA -% lowB;
+ if (lowA < lowB) {
+ high = highA -% 1;
+ }
+ if (high >> 63 == 0) {
+ if ((high | low) == 0) {
+ return 0 * a;
+ }
+ highA = 2 *% high + (low >> 63);
+ lowA = 2 *% low;
+ } else {
+ highA = 2 *% highA + (lowA >> 63);
+ lowA = 2 *% lowA;
+ }
+ }
+
+ var high = highA -% highB;
+ var low = lowA -% lowB;
+ if (lowA < lowB) {
+ high -= 1;
+ }
+ if (high >> 63 == 0) {
+ if ((high | low) == 0) {
+ return 0 * a;
+ }
+ highA = high;
+ lowA = low;
+ }
+
+ while (highA >> 48 == 0) {
+ highA = 2 *% highA + (lowA >> 63);
+ lowA = 2 *% lowA;
+ expA = expA - 1;
+ }
+
+ // Overwrite the current amod with the values in highA and lowA
+ aPtr_u64[high_index] = highA;
+ aPtr_u64[low_index] = lowA;
+
+ // Combine the exponent with the sign, normalize if happend to be denormalized
+ if (expA <= 0) {
+ aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, (expA +% 120))) | signA;
+ amod *= 0x1p-120;
+ } else {
+ aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, expA)) | signA;
+ }
+
+ return amod;
+}
+
+test {
+ _ = @import("floatfmodl_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/floatfmodl_test.zig b/lib/std/special/compiler_rt/floatfmodl_test.zig
new file mode 100644
index 0000000000..58636ef6f7
--- /dev/null
+++ b/lib/std/special/compiler_rt/floatfmodl_test.zig
@@ -0,0 +1,46 @@
+const std = @import("std");
+const fmodl = @import("floatfmodl.zig");
+const testing = std.testing;
+
+fn test_fmodl(a: f128, b: f128, exp: f128) !void {
+ const res = fmodl.fmodl(a, b);
+ try testing.expect(exp == res);
+}
+
+fn test_fmodl_nans() !void {
+ try testing.expect(std.math.isNan(fmodl.fmodl(1.0, std.math.nan_f128)));
+ try testing.expect(std.math.isNan(fmodl.fmodl(1.0, -std.math.nan_f128)));
+ try testing.expect(std.math.isNan(fmodl.fmodl(std.math.nan_f128, 1.0)));
+ try testing.expect(std.math.isNan(fmodl.fmodl(-std.math.nan_f128, 1.0)));
+}
+
+fn test_fmodl_infs() !void {
+ try testing.expect(fmodl.fmodl(1.0, std.math.inf_f128) == 1.0);
+ try testing.expect(fmodl.fmodl(1.0, -std.math.inf_f128) == 1.0);
+ try testing.expect(std.math.isNan(fmodl.fmodl(std.math.inf_f128, 1.0)));
+ try testing.expect(std.math.isNan(fmodl.fmodl(-std.math.inf_f128, 1.0)));
+}
+
+test "fmodl" {
+ try test_fmodl(6.8, 4.0, 2.8);
+ try test_fmodl(6.8, -4.0, 2.8);
+ try test_fmodl(-6.8, 4.0, -2.8);
+ try test_fmodl(-6.8, -4.0, -2.8);
+ try test_fmodl(3.0, 2.0, 1.0);
+ try test_fmodl(-5.0, 3.0, -2.0);
+ try test_fmodl(3.0, 2.0, 1.0);
+ try test_fmodl(1.0, 2.0, 1.0);
+ try test_fmodl(0.0, 1.0, 0.0);
+ try test_fmodl(-0.0, 1.0, -0.0);
+ try test_fmodl(7046119.0, 5558362.0, 1487757.0);
+ try test_fmodl(9010357.0, 1957236.0, 1181413.0);
+
+ // Denormals
+ const a: f128 = 0xedcb34a235253948765432134674p-16494;
+ const b: f128 = 0x5d2e38791cfbc0737402da5a9518p-16494;
+ const exp: f128 = 0x336ec3affb2db8618e4e7d5e1c44p-16494;
+ try test_fmodl(a, b, exp);
+
+ try test_fmodl_nans();
+ try test_fmodl_infs();
+}
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index 0b6332f480..63466849a4 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -3338,6 +3338,32 @@ static void float_div_floor(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
}
}
+// c = a - b * trunc(a / b)
+static float16_t zig_f16_rem(float16_t a, float16_t b) {
+ float16_t c;
+ c = f16_div(a, b);
+ c = f16_roundToInt(c, softfloat_round_minMag, false);
+ c = f16_mul(b, c);
+ c = f16_sub(a, c);
+ return c;
+}
+
+// c = a - b * trunc(a / b)
+static void zig_f128M_rem(const float128_t* a, const float128_t* b, float128_t* c) {
+ f128M_div(a, b, c);
+ f128M_roundToInt(c, softfloat_round_minMag, false, c);
+ f128M_mul(b, c, c);
+ f128M_sub(a, c, c);
+}
+
+// c = a - b * trunc(a / b)
+static void zig_extF80M_rem(const extFloat80_t* a, const extFloat80_t* b, extFloat80_t* c) {
+ extF80M_div(a, b, c);
+ extF80M_roundToInt(c, softfloat_round_minMag, false, c);
+ extF80M_mul(b, c, c);
+ extF80M_sub(a, c, c);
+}
+
static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
@@ -3346,7 +3372,7 @@ static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
} else if (op1->type->id == ZigTypeIdFloat) {
switch (op1->type->data.floating.bit_count) {
case 16:
- out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16);
+ out_val->data.x_f16 = zig_f16_rem(op1->data.x_f16, op2->data.x_f16);
return;
case 32:
out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32);
@@ -3355,10 +3381,10 @@ static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
out_val->data.x_f64 = fmod(op1->data.x_f64, op2->data.x_f64);
return;
case 80:
- extF80M_rem(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
+ zig_extF80M_rem(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80);
return;
case 128:
- f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
+ zig_f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
default:
zig_unreachable();
diff --git a/src/value.zig b/src/value.zig
index 1f93a828aa..89c57ad53d 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1482,8 +1482,7 @@ pub const Value = extern union {
.float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0,
//.float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0,
.float_80 => @panic("TODO implement __remx in compiler-rt"),
- //.float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
- .float_128 => @panic("TODO implement fmodl in compiler-rt"),
+ .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
else => unreachable,
};
@@ -2888,9 +2887,6 @@ pub const Value = extern union {
return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val));
},
128 => {
- if (true) {
- @panic("TODO implement compiler_rt fmodl");
- }
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val));
@@ -2925,9 +2921,6 @@ pub const Value = extern union {
return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val));
},
128 => {
- if (true) {
- @panic("TODO implement compiler_rt fmodl");
- }
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val));
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index a9000353b8..fe5329ec81 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -782,8 +782,6 @@ test "comptime float rem int" {
}
test "remainder division" {
- if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
-
comptime try remdiv(f16);
comptime try remdiv(f32);
comptime try remdiv(f64);
@@ -798,6 +796,64 @@ fn remdiv(comptime T: type) !void {
try expect(@as(T, 1) == @as(T, 7) % @as(T, 3));
}
+test "float remainder division using @rem" {
+ comptime try frem(f16);
+ comptime try frem(f32);
+ comptime try frem(f64);
+ comptime try frem(f128);
+ try frem(f16);
+ try frem(f32);
+ try frem(f64);
+ try frem(f128);
+}
+
+fn frem(comptime T: type) !void {
+ const epsilon = switch (T) {
+ f16 => 1.0,
+ f32 => 0.001,
+ f64 => 0.00001,
+ f128 => 0.0000001,
+ else => unreachable,
+ };
+
+ try expect(std.math.fabs(@rem(@as(T, 6.9), @as(T, 4.0)) - @as(T, 2.9)) < epsilon);
+ try expect(std.math.fabs(@rem(@as(T, -6.9), @as(T, 4.0)) - @as(T, -2.9)) < epsilon);
+ try expect(std.math.fabs(@rem(@as(T, -5.0), @as(T, 3.0)) - @as(T, -2.0)) < epsilon);
+ try expect(std.math.fabs(@rem(@as(T, 3.0), @as(T, 2.0)) - @as(T, 1.0)) < epsilon);
+ try expect(std.math.fabs(@rem(@as(T, 1.0), @as(T, 2.0)) - @as(T, 1.0)) < epsilon);
+ try expect(std.math.fabs(@rem(@as(T, 0.0), @as(T, 1.0)) - @as(T, 0.0)) < epsilon);
+ try expect(std.math.fabs(@rem(@as(T, -0.0), @as(T, 1.0)) - @as(T, -0.0)) < epsilon);
+}
+
+test "float modulo division using @mod" {
+ comptime try fmod(f16);
+ comptime try fmod(f32);
+ comptime try fmod(f64);
+ comptime try fmod(f128);
+ try fmod(f16);
+ try fmod(f32);
+ try fmod(f64);
+ try fmod(f128);
+}
+
+fn fmod(comptime T: type) !void {
+ const epsilon = switch (T) {
+ f16 => 1.0,
+ f32 => 0.001,
+ f64 => 0.00001,
+ f128 => 0.0000001,
+ else => unreachable,
+ };
+
+ try expect(std.math.fabs(@mod(@as(T, 6.9), @as(T, 4.0)) - @as(T, 2.9)) < epsilon);
+ try expect(std.math.fabs(@mod(@as(T, -6.9), @as(T, 4.0)) - @as(T, 1.1)) < epsilon);
+ try expect(std.math.fabs(@mod(@as(T, -5.0), @as(T, 3.0)) - @as(T, 1.0)) < epsilon);
+ try expect(std.math.fabs(@mod(@as(T, 3.0), @as(T, 2.0)) - @as(T, 1.0)) < epsilon);
+ try expect(std.math.fabs(@mod(@as(T, 1.0), @as(T, 2.0)) - @as(T, 1.0)) < epsilon);
+ try expect(std.math.fabs(@mod(@as(T, 0.0), @as(T, 1.0)) - @as(T, 0.0)) < epsilon);
+ try expect(std.math.fabs(@mod(@as(T, -0.0), @as(T, 1.0)) - @as(T, -0.0)) < epsilon);
+}
+
test "@sqrt" {
try testSqrt(f64, 12.0);
comptime try testSqrt(f64, 12.0);
From 55fa349ad946aaa4e2ff5a5dc2a3f0d6bce64282 Mon Sep 17 00:00:00 2001
From: Koakuma
Date: Sun, 13 Feb 2022 22:16:22 +0700
Subject: [PATCH 0165/2031] Import SPARCv9 libunwind
Import LLVM's D32450/D116857 patch to enable unwinding support on
SPARCv9 systems.
---
lib/libunwind/include/__libunwind_config.h | 7 +
lib/libunwind/src/DwarfInstructions.hpp | 22 ++-
lib/libunwind/src/DwarfParser.hpp | 25 ++-
lib/libunwind/src/Registers.hpp | 187 +++++++++++++++++++++
lib/libunwind/src/UnwindCursor.hpp | 16 ++
lib/libunwind/src/UnwindRegistersRestore.S | 47 ++++++
lib/libunwind/src/UnwindRegistersSave.S | 60 ++++++-
lib/libunwind/src/config.h | 11 +-
lib/libunwind/src/libunwind.cpp | 2 +
9 files changed, 367 insertions(+), 10 deletions(-)
diff --git a/lib/libunwind/include/__libunwind_config.h b/lib/libunwind/include/__libunwind_config.h
index a50ba05388..438d6a705d 100644
--- a/lib/libunwind/include/__libunwind_config.h
+++ b/lib/libunwind/include/__libunwind_config.h
@@ -23,6 +23,7 @@
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_OR1K 32
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MIPS 65
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC 31
+#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC64 31
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_HEXAGON 34
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_RISCV 64
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_VE 143
@@ -125,6 +126,11 @@
# error "Unsupported MIPS ABI and/or environment"
# endif
# define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_MIPS
+# elif defined(__sparc__) && defined(__arch64__)
+# define _LIBUNWIND_TARGET_SPARC64 1
+# define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC64
+# define _LIBUNWIND_CONTEXT_SIZE 33
+# define _LIBUNWIND_CURSOR_SIZE 45
# elif defined(__sparc__)
#define _LIBUNWIND_TARGET_SPARC 1
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC
@@ -165,6 +171,7 @@
# define _LIBUNWIND_TARGET_MIPS_O32 1
# define _LIBUNWIND_TARGET_MIPS_NEWABI 1
# define _LIBUNWIND_TARGET_SPARC 1
+# define _LIBUNWIND_TARGET_SPARC64 1
# define _LIBUNWIND_TARGET_HEXAGON 1
# define _LIBUNWIND_TARGET_RISCV 1
# define _LIBUNWIND_TARGET_VE 1
diff --git a/lib/libunwind/src/DwarfInstructions.hpp b/lib/libunwind/src/DwarfInstructions.hpp
index 686c6be0d8..60182e4fef 100644
--- a/lib/libunwind/src/DwarfInstructions.hpp
+++ b/lib/libunwind/src/DwarfInstructions.hpp
@@ -67,7 +67,7 @@ private:
return (pint_t)((sint_t)registers.getRegister((int)prolog.cfaRegister) +
prolog.cfaRegisterOffset);
if (prolog.cfaExpression != 0)
- return evaluateExpression((pint_t)prolog.cfaExpression, addressSpace,
+ return evaluateExpression((pint_t)prolog.cfaExpression, addressSpace,
registers, 0);
assert(0 && "getCFA(): unknown location");
__builtin_unreachable();
@@ -75,6 +75,14 @@ private:
};
+template
+auto getSparcWCookie(const R &r, int) -> decltype(r.getWCookie()) {
+ return r.getWCookie();
+}
+template uint64_t getSparcWCookie(const R &, long) {
+ return 0;
+}
+
template
typename A::pint_t DwarfInstructions::getSavedRegister(
A &addressSpace, const R ®isters, pint_t cfa,
@@ -83,6 +91,10 @@ typename A::pint_t DwarfInstructions::getSavedRegister(
case CFI_Parser::kRegisterInCFA:
return (pint_t)addressSpace.getRegister(cfa + (pint_t)savedReg.value);
+ case CFI_Parser::kRegisterInCFADecrypt: // sparc64 specific
+ return addressSpace.getP(cfa + (pint_t)savedReg.value) ^
+ getSparcWCookie(registers, 0);
+
case CFI_Parser::kRegisterAtExpression:
return (pint_t)addressSpace.getRegister(evaluateExpression(
(pint_t)savedReg.value, addressSpace, registers, cfa));
@@ -121,6 +133,7 @@ double DwarfInstructions::getSavedFloatRegister(
case CFI_Parser::kRegisterUndefined:
case CFI_Parser::kRegisterOffsetFromCFA:
case CFI_Parser::kRegisterInRegister:
+ case CFI_Parser::kRegisterInCFADecrypt:
// FIX ME
break;
}
@@ -145,6 +158,7 @@ v128 DwarfInstructions::getSavedVectorRegister(
case CFI_Parser::kRegisterUndefined:
case CFI_Parser::kRegisterOffsetFromCFA:
case CFI_Parser::kRegisterInRegister:
+ case CFI_Parser::kRegisterInCFADecrypt:
// FIX ME
break;
}
@@ -249,6 +263,12 @@ int DwarfInstructions::stepWithDwarf(A &addressSpace, pint_t pc,
}
#endif
+#if defined(_LIBUNWIND_TARGET_SPARC64)
+ // Skip call site instruction and delay slot
+ if (R::getArch() == REGISTERS_SPARC64)
+ returnAddress += 8;
+#endif
+
#if defined(_LIBUNWIND_TARGET_PPC64)
#define PPC64_ELFV1_R2_LOAD_INST_ENCODING 0xe8410028u // ld r2,40(r1)
#define PPC64_ELFV1_R2_OFFSET 40
diff --git a/lib/libunwind/src/DwarfParser.hpp b/lib/libunwind/src/DwarfParser.hpp
index de0eb6de9d..f0aa4085d3 100644
--- a/lib/libunwind/src/DwarfParser.hpp
+++ b/lib/libunwind/src/DwarfParser.hpp
@@ -71,6 +71,7 @@ public:
kRegisterUnused,
kRegisterUndefined,
kRegisterInCFA,
+ kRegisterInCFADecrypt, // sparc64 specific
kRegisterOffsetFromCFA,
kRegisterInRegister,
kRegisterAtExpression,
@@ -723,7 +724,8 @@ bool CFI_Parser::parseFDEInstructions(A &addressSpace,
"DW_CFA_GNU_negative_offset_extended(%" PRId64 ")\n", offset);
break;
-#if defined(_LIBUNWIND_TARGET_AARCH64) || defined(_LIBUNWIND_TARGET_SPARC)
+#if defined(_LIBUNWIND_TARGET_AARCH64) || defined(_LIBUNWIND_TARGET_SPARC) || \
+ defined(_LIBUNWIND_TARGET_SPARC64)
// The same constant is used to represent different instructions on
// AArch64 (negate_ra_state) and SPARC (window_save).
static_assert(DW_CFA_AARCH64_negate_ra_state == DW_CFA_GNU_window_save,
@@ -757,8 +759,29 @@ bool CFI_Parser::parseFDEInstructions(A &addressSpace,
}
break;
#endif
+
+#if defined(_LIBUNWIND_TARGET_SPARC64)
+ // case DW_CFA_GNU_window_save:
+ case REGISTERS_SPARC64:
+ // Don't save %o0-%o7 on sparc64.
+ // https://reviews.llvm.org/D32450#736405
+
+ for (reg = UNW_SPARC_L0; reg <= UNW_SPARC_I7; reg++) {
+ if (reg == UNW_SPARC_I7)
+ results->setRegister(
+ reg, kRegisterInCFADecrypt,
+ ((int64_t)reg - UNW_SPARC_L0) * sizeof(pint_t), initialState);
+ else
+ results->setRegister(
+ reg, kRegisterInCFA,
+ ((int64_t)reg - UNW_SPARC_L0) * sizeof(pint_t), initialState);
+ }
+ _LIBUNWIND_TRACE_DWARF("DW_CFA_GNU_window_save\n");
+ break;
+#endif
}
break;
+
#else
(void)arch;
#endif
diff --git a/lib/libunwind/src/Registers.hpp b/lib/libunwind/src/Registers.hpp
index aea84cc227..e37021a7c7 100644
--- a/lib/libunwind/src/Registers.hpp
+++ b/lib/libunwind/src/Registers.hpp
@@ -34,6 +34,7 @@ enum {
REGISTERS_MIPS_O32,
REGISTERS_MIPS_NEWABI,
REGISTERS_SPARC,
+ REGISTERS_SPARC64,
REGISTERS_HEXAGON,
REGISTERS_RISCV,
REGISTERS_VE,
@@ -3546,6 +3547,192 @@ inline const char *Registers_sparc::getRegisterName(int regNum) {
}
#endif // _LIBUNWIND_TARGET_SPARC
+
+#if defined(_LIBUNWIND_TARGET_SPARC64)
+/// Registers_sparc64 holds the register state of a thread in a 64-bit
+/// sparc process.
+class _LIBUNWIND_HIDDEN Registers_sparc64 {
+public:
+ Registers_sparc64() = default;
+ Registers_sparc64(const void *registers);
+
+ bool validRegister(int num) const;
+ uint64_t getRegister(int num) const;
+ void setRegister(int num, uint64_t value);
+ bool validFloatRegister(int num) const;
+ double getFloatRegister(int num) const;
+ void setFloatRegister(int num, double value);
+ bool validVectorRegister(int num) const;
+ v128 getVectorRegister(int num) const;
+ void setVectorRegister(int num, v128 value);
+ const char *getRegisterName(int num);
+ void jumpto();
+ static int lastDwarfRegNum() {
+ return _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC64;
+ }
+ static int getArch() { return REGISTERS_SPARC64; }
+
+ uint64_t getSP() const { return _registers.__regs[UNW_SPARC_O6] + 2047; }
+ void setSP(uint64_t value) { _registers.__regs[UNW_SPARC_O6] = value - 2047; }
+ uint64_t getIP() const { return _registers.__regs[UNW_SPARC_O7]; }
+ void setIP(uint64_t value) { _registers.__regs[UNW_SPARC_O7] = value; }
+ uint64_t getWCookie() const { return _wcookie; }
+
+private:
+ struct sparc64_thread_state_t {
+ uint64_t __regs[32];
+ };
+
+ sparc64_thread_state_t _registers{};
+ uint64_t _wcookie = 0;
+};
+
+inline Registers_sparc64::Registers_sparc64(const void *registers) {
+ static_assert((check_fit::does_fit),
+ "sparc64 registers do not fit into unw_context_t");
+ memcpy(&_registers, registers, sizeof(_registers));
+ memcpy(&_wcookie,
+ static_cast(registers) + sizeof(_registers),
+ sizeof(_wcookie));
+}
+
+inline bool Registers_sparc64::validRegister(int regNum) const {
+ if (regNum == UNW_REG_IP)
+ return true;
+ if (regNum == UNW_REG_SP)
+ return true;
+ if (regNum < 0)
+ return false;
+ if (regNum <= UNW_SPARC_I7)
+ return true;
+ return false;
+}
+
+inline uint64_t Registers_sparc64::getRegister(int regNum) const {
+ if (regNum >= UNW_SPARC_G0 && regNum <= UNW_SPARC_I7)
+ return _registers.__regs[regNum];
+
+ switch (regNum) {
+ case UNW_REG_IP:
+ return _registers.__regs[UNW_SPARC_O7];
+ case UNW_REG_SP:
+ return _registers.__regs[UNW_SPARC_O6] + 2047;
+ }
+ _LIBUNWIND_ABORT("unsupported sparc64 register");
+}
+
+inline void Registers_sparc64::setRegister(int regNum, uint64_t value) {
+ if (regNum >= UNW_SPARC_G0 && regNum <= UNW_SPARC_I7) {
+ _registers.__regs[regNum] = value;
+ return;
+ }
+
+ switch (regNum) {
+ case UNW_REG_IP:
+ _registers.__regs[UNW_SPARC_O7] = value;
+ return;
+ case UNW_REG_SP:
+ _registers.__regs[UNW_SPARC_O6] = value - 2047;
+ return;
+ }
+ _LIBUNWIND_ABORT("unsupported sparc64 register");
+}
+
+inline bool Registers_sparc64::validFloatRegister(int) const { return false; }
+
+inline double Registers_sparc64::getFloatRegister(int) const {
+ _LIBUNWIND_ABORT("no sparc64 float registers");
+}
+
+inline void Registers_sparc64::setFloatRegister(int, double) {
+ _LIBUNWIND_ABORT("no sparc64 float registers");
+}
+
+inline bool Registers_sparc64::validVectorRegister(int) const { return false; }
+
+inline v128 Registers_sparc64::getVectorRegister(int) const {
+ _LIBUNWIND_ABORT("no sparc64 vector registers");
+}
+
+inline void Registers_sparc64::setVectorRegister(int, v128) {
+ _LIBUNWIND_ABORT("no sparc64 vector registers");
+}
+
+inline const char *Registers_sparc64::getRegisterName(int regNum) {
+ switch (regNum) {
+ case UNW_REG_IP:
+ return "pc";
+ case UNW_SPARC_G0:
+ return "g0";
+ case UNW_SPARC_G1:
+ return "g1";
+ case UNW_SPARC_G2:
+ return "g2";
+ case UNW_SPARC_G3:
+ return "g3";
+ case UNW_SPARC_G4:
+ return "g4";
+ case UNW_SPARC_G5:
+ return "g5";
+ case UNW_SPARC_G6:
+ return "g6";
+ case UNW_SPARC_G7:
+ return "g7";
+ case UNW_SPARC_O0:
+ return "o0";
+ case UNW_SPARC_O1:
+ return "o1";
+ case UNW_SPARC_O2:
+ return "o2";
+ case UNW_SPARC_O3:
+ return "o3";
+ case UNW_SPARC_O4:
+ return "o4";
+ case UNW_SPARC_O5:
+ return "o5";
+ case UNW_REG_SP:
+ case UNW_SPARC_O6:
+ return "o6";
+ case UNW_SPARC_O7:
+ return "o7";
+ case UNW_SPARC_L0:
+ return "l0";
+ case UNW_SPARC_L1:
+ return "l1";
+ case UNW_SPARC_L2:
+ return "l2";
+ case UNW_SPARC_L3:
+ return "l3";
+ case UNW_SPARC_L4:
+ return "l4";
+ case UNW_SPARC_L5:
+ return "l5";
+ case UNW_SPARC_L6:
+ return "l6";
+ case UNW_SPARC_L7:
+ return "l7";
+ case UNW_SPARC_I0:
+ return "i0";
+ case UNW_SPARC_I1:
+ return "i1";
+ case UNW_SPARC_I2:
+ return "i2";
+ case UNW_SPARC_I3:
+ return "i3";
+ case UNW_SPARC_I4:
+ return "i4";
+ case UNW_SPARC_I5:
+ return "i5";
+ case UNW_SPARC_I6:
+ return "i6";
+ case UNW_SPARC_I7:
+ return "i7";
+ default:
+ return "unknown register";
+ }
+}
+#endif // _LIBUNWIND_TARGET_SPARC64
+
#if defined(_LIBUNWIND_TARGET_HEXAGON)
/// Registers_hexagon holds the register state of a thread in a Hexagon QDSP6
/// process.
diff --git a/lib/libunwind/src/UnwindCursor.hpp b/lib/libunwind/src/UnwindCursor.hpp
index 757d9808a9..d63dff5e61 100644
--- a/lib/libunwind/src/UnwindCursor.hpp
+++ b/lib/libunwind/src/UnwindCursor.hpp
@@ -1020,6 +1020,10 @@ private:
int stepWithCompactEncoding(Registers_sparc &) { return UNW_EINVAL; }
#endif
+#if defined(_LIBUNWIND_TARGET_SPARC64)
+ int stepWithCompactEncoding(Registers_sparc64 &) { return UNW_EINVAL; }
+#endif
+
#if defined (_LIBUNWIND_TARGET_RISCV)
int stepWithCompactEncoding(Registers_riscv &) {
return UNW_EINVAL;
@@ -1092,6 +1096,12 @@ private:
bool compactSaysUseDwarf(Registers_sparc &, uint32_t *) const { return true; }
#endif
+#if defined(_LIBUNWIND_TARGET_SPARC64)
+ bool compactSaysUseDwarf(Registers_sparc64 &, uint32_t *) const {
+ return true;
+ }
+#endif
+
#if defined (_LIBUNWIND_TARGET_RISCV)
bool compactSaysUseDwarf(Registers_riscv &, uint32_t *) const {
return true;
@@ -1170,6 +1180,12 @@ private:
compact_unwind_encoding_t dwarfEncoding(Registers_sparc &) const { return 0; }
#endif
+#if defined(_LIBUNWIND_TARGET_SPARC64)
+ compact_unwind_encoding_t dwarfEncoding(Registers_sparc64 &) const {
+ return 0;
+ }
+#endif
+
#if defined (_LIBUNWIND_TARGET_RISCV)
compact_unwind_encoding_t dwarfEncoding(Registers_riscv &) const {
return 0;
diff --git a/lib/libunwind/src/UnwindRegistersRestore.S b/lib/libunwind/src/UnwindRegistersRestore.S
index d8bf1adee4..bbdabcc355 100644
--- a/lib/libunwind/src/UnwindRegistersRestore.S
+++ b/lib/libunwind/src/UnwindRegistersRestore.S
@@ -1050,6 +1050,53 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
ld $4, (8 * 4)($4)
.set pop
+#elif defined(__sparc__) && defined(__arch64__)
+
+DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)
+//
+// void libunwind::Registers_sparc64::jumpto()
+//
+// On entry:
+// thread_state pointer is in %o0
+//
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g6, #scratch
+ .register %g7, #scratch
+ flushw
+ ldx [%o0 + 0x08], %g1
+ ldx [%o0 + 0x10], %g2
+ ldx [%o0 + 0x18], %g3
+ ldx [%o0 + 0x20], %g4
+ ldx [%o0 + 0x28], %g5
+ ldx [%o0 + 0x30], %g6
+ ldx [%o0 + 0x38], %g7
+ ldx [%o0 + 0x48], %o1
+ ldx [%o0 + 0x50], %o2
+ ldx [%o0 + 0x58], %o3
+ ldx [%o0 + 0x60], %o4
+ ldx [%o0 + 0x68], %o5
+ ldx [%o0 + 0x70], %o6
+ ldx [%o0 + 0x78], %o7
+ ldx [%o0 + 0x80], %l0
+ ldx [%o0 + 0x88], %l1
+ ldx [%o0 + 0x90], %l2
+ ldx [%o0 + 0x98], %l3
+ ldx [%o0 + 0xa0], %l4
+ ldx [%o0 + 0xa8], %l5
+ ldx [%o0 + 0xb0], %l6
+ ldx [%o0 + 0xb8], %l7
+ ldx [%o0 + 0xc0], %i0
+ ldx [%o0 + 0xc8], %i1
+ ldx [%o0 + 0xd0], %i2
+ ldx [%o0 + 0xd8], %i3
+ ldx [%o0 + 0xe0], %i4
+ ldx [%o0 + 0xe8], %i5
+ ldx [%o0 + 0xf0], %i6
+ ldx [%o0 + 0xf8], %i7
+ jmp %o7
+ ldx [%o0 + 0x40], %o0
+
#elif defined(__sparc__)
//
diff --git a/lib/libunwind/src/UnwindRegistersSave.S b/lib/libunwind/src/UnwindRegistersSave.S
index f66dc532c2..b6fca2be27 100644
--- a/lib/libunwind/src/UnwindRegistersSave.S
+++ b/lib/libunwind/src/UnwindRegistersSave.S
@@ -766,7 +766,7 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
@
@ On entry:
@ thread_state pointer is in r0
-@
+@
@ Per EHABI #4.7 this only saves the core integer registers.
@ EHABI #7.4.5 notes that in general all VRS registers should be restored
@ however this is very hard to do for VFP registers because it is unknown
@@ -996,6 +996,64 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
jumpr r31
+#elif defined(__sparc__) && defined(__arch64__)
+
+#
+# extern int __unw_getcontext(unw_context_t* thread_state)
+#
+# On entry:
+# thread_state pointer is in %o0
+#
+DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g6, #scratch
+ .register %g7, #scratch
+ stx %g1, [%o0 + 0x08]
+ stx %g2, [%o0 + 0x10]
+ stx %g3, [%o0 + 0x18]
+ stx %g4, [%o0 + 0x20]
+ stx %g5, [%o0 + 0x28]
+ stx %g6, [%o0 + 0x30]
+ stx %g7, [%o0 + 0x38]
+ stx %o0, [%o0 + 0x40]
+ stx %o1, [%o0 + 0x48]
+ stx %o2, [%o0 + 0x50]
+ stx %o3, [%o0 + 0x58]
+ stx %o4, [%o0 + 0x60]
+ stx %o5, [%o0 + 0x68]
+ stx %o6, [%o0 + 0x70]
+ stx %o7, [%o0 + 0x78]
+ stx %l0, [%o0 + 0x80]
+ stx %l1, [%o0 + 0x88]
+ stx %l2, [%o0 + 0x90]
+ stx %l3, [%o0 + 0x98]
+ stx %l4, [%o0 + 0xa0]
+ stx %l5, [%o0 + 0xa8]
+ stx %l6, [%o0 + 0xb0]
+ stx %l7, [%o0 + 0xb8]
+ stx %i0, [%o0 + 0xc0]
+ stx %i1, [%o0 + 0xc8]
+ stx %i2, [%o0 + 0xd0]
+ stx %i3, [%o0 + 0xd8]
+ stx %i4, [%o0 + 0xe0]
+ stx %i5, [%o0 + 0xe8]
+ stx %i6, [%o0 + 0xf0]
+ stx %i7, [%o0 + 0xf8]
+
+ # save StackGhost cookie
+ mov %i7, %g4
+ save %sp, -176, %sp
+ # register window flush necessary even without StackGhost
+ flushw
+ restore
+ ldx [%sp + 2047 + 0x78], %g5
+ xor %g4, %g5, %g4
+ stx %g4, [%o0 + 0x100]
+ retl
+ # return UNW_ESUCCESS
+ clr %o0
+
#elif defined(__sparc__)
#
diff --git a/lib/libunwind/src/config.h b/lib/libunwind/src/config.h
index 2ab9d2f5e0..f25b390c84 100644
--- a/lib/libunwind/src/config.h
+++ b/lib/libunwind/src/config.h
@@ -109,13 +109,10 @@
#define _LIBUNWIND_SUPPORT_FRAME_APIS
#endif
-#if defined(__i386__) || defined(__x86_64__) || \
- defined(__ppc__) || defined(__ppc64__) || defined(__powerpc64__) || \
- (!defined(__APPLE__) && defined(__arm__)) || \
- defined(__aarch64__) || \
- defined(__mips__) || \
- defined(__riscv) || \
- defined(__hexagon__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
+ (!defined(__APPLE__) && defined(__arm__)) || defined(__aarch64__) || \
+ defined(__mips__) || defined(__riscv) || defined(__hexagon__) || \
+ defined(__sparc__)
#if !defined(_LIBUNWIND_BUILD_SJLJ_APIS)
#define _LIBUNWIND_BUILD_ZERO_COST_APIS
#endif
diff --git a/lib/libunwind/src/libunwind.cpp b/lib/libunwind/src/libunwind.cpp
index dca403c863..ac01246e86 100644
--- a/lib/libunwind/src/libunwind.cpp
+++ b/lib/libunwind/src/libunwind.cpp
@@ -69,6 +69,8 @@ _LIBUNWIND_HIDDEN int __unw_init_local(unw_cursor_t *cursor,
# define REGISTER_KIND Registers_mips_newabi
#elif defined(__mips__)
# warning The MIPS architecture is not supported with this ABI and environment!
+#elif defined(__sparc__) && defined(__arch64__)
+# define REGISTER_KIND Registers_sparc64
#elif defined(__sparc__)
# define REGISTER_KIND Registers_sparc
#elif defined(__riscv)
From 2a73700c0fc177fb8781b277a6ba5017dce875b9 Mon Sep 17 00:00:00 2001
From: Tw
Date: Sat, 29 Jan 2022 17:44:13 +0800
Subject: [PATCH 0166/2031] Fix preadv/pwritev bug on 64bit platform
Signed-off-by: Tw
---
lib/std/os/linux.zig | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index c1591f7ea1..cc9a8bc85c 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -440,26 +440,30 @@ pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
}
pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: i64) usize {
- const offset_halves = splitValueLE64(offset);
+ const offset_u = @bitCast(u64, offset);
return syscall5(
.preadv,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
- offset_halves[0],
- offset_halves[1],
+ // Kernel expects the offset is splitted into largest natural word-size.
+ // See following link for detail:
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=601cc11d054ae4b5e9b5babec3d8e4667a2cb9b5
+ @truncate(usize, offset_u),
+ if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
);
}
pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: i64, flags: kernel_rwf) usize {
- const offset_halves = splitValue64(offset);
+ const offset_u = @bitCast(u64, offset);
return syscall6(
.preadv2,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
- offset_halves[0],
- offset_halves[1],
+ // See comments in preadv
+ @truncate(usize, offset_u),
+ if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
flags,
);
}
@@ -473,26 +477,28 @@ pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize {
}
pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64) usize {
- const offset_halves = splitValueLE64(offset);
+ const offset_u = @bitCast(u64, offset);
return syscall5(
.pwritev,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
- offset_halves[0],
- offset_halves[1],
+ // See comments in preadv
+ @truncate(usize, offset_u),
+ if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
);
}
pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64, flags: kernel_rwf) usize {
- const offset_halves = splitValue64(offset);
+ const offset_u = @bitCast(u64, offset);
return syscall6(
.pwritev2,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
- offset_halves[0],
- offset_halves[1],
+ // See comments in preadv
+ @truncate(usize, offset_u),
+ if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
flags,
);
}
From 7b72fc6bbc5554643bc27933310899e32783b81b Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Sun, 13 Feb 2022 11:54:37 -0700
Subject: [PATCH 0167/2031] Add `abi_size` parameter to
read/writeTwosComplement
Big-int functions were updated to respect the provided abi_size, rather
than inferring a potentially incorrect abi_size implicitly.
In combination with the convention that any required padding bits are
added on the MSB end, this means that exotic integers can potentially
have a well-defined memory layout.
---
lib/std/math/big/int.zig | 141 +++++++++++++++++-----------------
lib/std/math/big/int_test.zig | 95 ++++++++++++++++++++++-
src/value.zig | 9 ++-
3 files changed, 166 insertions(+), 79 deletions(-)
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 1c6404fb3c..731f4b5456 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1624,18 +1624,16 @@ pub const Mutable = struct {
}
/// Read the value of `x` from `buffer`
- /// Asserts that `buffer` and `bit_count` are large enough to store the value.
+ /// Asserts that `buffer`, `abi_size`, and `bit_count` are large enough to store the value.
///
- /// For integers with a well-defined layout (e.g. all power-of-two integers), this function
- /// reads from `buffer` as if it were the contents of @ptrCast([]const u8, &x), where the
- /// slice length is taken to be @sizeOf(std.meta.Int(signedness, ))
- ///
- /// For integers with a non-well-defined layout, `buffer` must have been created by
- /// writeTwosComplement.
+ /// The contents of `buffer` are interpreted as if they were the contents of
+ /// @ptrCast(*[abi_size]const u8, &x). Byte ordering is determined by `endian`
+ /// and any required padding bits are expected on the MSB end.
pub fn readTwosComplement(
x: *Mutable,
buffer: []const u8,
bit_count: usize,
+ abi_size: usize,
endian: Endian,
signedness: Signedness,
) void {
@@ -1646,20 +1644,18 @@ pub const Mutable = struct {
return;
}
- // byte_count is the total amount of bytes to read from buffer
- var byte_count = @sizeOf(Limb) * (bit_count / @bitSizeOf(Limb));
- if (bit_count % @bitSizeOf(Limb) != 0) { // Round up to a power-of-two integer <= Limb
- byte_count += (std.math.ceilPowerOfTwoAssert(usize, bit_count % @bitSizeOf(Limb)) + 7) / 8;
- }
-
- const limb_count = calcTwosCompLimbCount(8 * byte_count);
+ // byte_count is our total read size: it cannot exceed abi_size,
+ // but may be less as long as it includes the required bits
+ const limb_count = calcTwosCompLimbCount(bit_count);
+ const byte_count = std.math.min(abi_size, @sizeOf(Limb) * limb_count);
+ assert(8 * byte_count >= bit_count);
// Check whether the input is negative
var positive = true;
if (signedness == .signed) {
var last_byte = switch (endian) {
.Little => ((bit_count + 7) / 8) - 1,
- .Big => byte_count - ((bit_count + 7) / 8),
+ .Big => abi_size - ((bit_count + 7) / 8),
};
const sign_bit = @as(u8, 1) << @intCast(u3, (bit_count - 1) % 8);
@@ -1672,7 +1668,7 @@ pub const Mutable = struct {
while (limb_index < bit_count / @bitSizeOf(Limb)) : (limb_index += 1) {
var buf_index = switch (endian) {
.Little => @sizeOf(Limb) * limb_index,
- .Big => byte_count - (limb_index + 1) * @sizeOf(Limb),
+ .Big => abi_size - (limb_index + 1) * @sizeOf(Limb),
};
const limb_buf = @ptrCast(*const [@sizeOf(Limb)]u8, buffer[buf_index..]);
@@ -1683,32 +1679,34 @@ pub const Mutable = struct {
x.limbs[limb_index] = limb;
}
- // Copy any remaining bytes, using the nearest power-of-two integer that is large enough
- const bits_left = @intCast(Log2Limb, bit_count % @bitSizeOf(Limb));
- if (bits_left != 0) {
- const bytes_read = limb_index * @sizeOf(Limb);
- const bytes_left = byte_count - bytes_read;
- var buffer_left = switch (endian) {
- .Little => buffer[bytes_read..],
- .Big => buffer[0..],
- };
+ // Copy the remaining N bytes (N <= @sizeOf(Limb))
+ var bytes_read = limb_index * @sizeOf(Limb);
+ if (bytes_read != byte_count) {
+ var limb: Limb = 0;
- var limb = @intCast(Limb, blk: {
- // zig fmt: off
- if (bytes_left == 1) break :blk mem.readInt( u8, buffer_left[0.. 1], endian);
- if (bytes_left == 2) break :blk mem.readInt( u16, buffer_left[0.. 2], endian);
- if (bytes_left == 4) break :blk mem.readInt( u32, buffer_left[0.. 4], endian);
- if (bytes_left == 8) break :blk mem.readInt( u64, buffer_left[0.. 8], endian);
- if (bytes_left == 16) break :blk mem.readInt(u128, buffer_left[0..16], endian);
- // zig fmt: on
- unreachable;
- });
+ while (bytes_read != byte_count) {
+ const read_size = std.math.floorPowerOfTwo(usize, byte_count - bytes_read);
+ var int_buffer = switch (endian) {
+ .Little => buffer[bytes_read..],
+ .Big => buffer[(abi_size - bytes_read - read_size)..],
+ };
+ limb |= @intCast(Limb, switch (read_size) {
+ 1 => mem.readInt(u8, int_buffer[0..1], endian),
+ 2 => mem.readInt(u16, int_buffer[0..2], endian),
+ 4 => mem.readInt(u32, int_buffer[0..4], endian),
+ 8 => mem.readInt(u64, int_buffer[0..8], endian),
+ 16 => mem.readInt(u128, int_buffer[0..16], endian),
+ else => unreachable,
+ }) << @intCast(Log2Limb, 8 * (bytes_read % @sizeOf(Limb)));
+ bytes_read += read_size;
+ }
// 2's complement (bitwise not, then add carry bit)
if (!positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
// Mask off any unused bits
- const mask = (@as(Limb, 1) << bits_left) -% 1; // 0b0..01..1 with (bits_left) trailing ones
+ const valid_bits = @intCast(Log2Limb, bit_count % @bitSizeOf(Limb));
+ const mask = (@as(Limb, 1) << valid_bits) -% 1; // 0b0..01..1 with (valid_bits_in_limb) trailing ones
limb &= mask;
x.limbs[limb_count - 1] = limb;
@@ -2076,21 +2074,16 @@ pub const Const = struct {
}
/// Write the value of `x` into `buffer`
- /// Asserts that `buffer` and `bit_count` are large enough to store the value.
+ /// Asserts that `buffer`, `abi_size`, and `bit_count` are large enough to store the value.
///
- /// For integers with a well-defined layout (e.g. all power-of-two integers), this function
- /// can be thought of as writing to `buffer` the contents of @ptrCast([]const u8, &x),
- /// where the slice length is taken to be @sizeOf(std.meta.Int(_,))
- ///
- /// For integers with a non-well-defined layout, the only requirement is that readTwosComplement
- /// on the same buffer creates an equivalent big integer.
- pub fn writeTwosComplement(x: Const, buffer: []u8, bit_count: usize, endian: Endian) void {
- if (bit_count == 0) return;
+ /// `buffer` is filled so that its contents match what would be observed via
+ /// @ptrCast(*[abi_size]const u8, &x). Byte ordering is determined by `endian`,
+ /// and any required padding bits are added on the MSB end.
+ pub fn writeTwosComplement(x: Const, buffer: []u8, bit_count: usize, abi_size: usize, endian: Endian) void {
- var byte_count = @sizeOf(Limb) * (bit_count / @bitSizeOf(Limb));
- if (bit_count % @bitSizeOf(Limb) != 0) {
- byte_count += (std.math.ceilPowerOfTwoAssert(usize, bit_count % @bitSizeOf(Limb)) + 7) / 8;
- }
+ // byte_count is our total write size
+ const byte_count = abi_size;
+ assert(8 * byte_count >= bit_count);
assert(buffer.len >= byte_count);
assert(x.fitsInTwosComp(if (x.positive) .unsigned else .signed, bit_count));
@@ -2100,7 +2093,7 @@ pub const Const = struct {
while (limb_index < byte_count / @sizeOf(Limb)) : (limb_index += 1) {
var buf_index = switch (endian) {
.Little => @sizeOf(Limb) * limb_index,
- .Big => byte_count - (limb_index + 1) * @sizeOf(Limb),
+ .Big => abi_size - (limb_index + 1) * @sizeOf(Limb),
};
var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
@@ -2111,32 +2104,36 @@ pub const Const = struct {
mem.writeInt(Limb, limb_buf, limb, endian);
}
- // Copy any remaining bytes
- if (byte_count % @sizeOf(Limb) != 0) {
- const bytes_read = limb_index * @sizeOf(Limb);
- const bytes_left = byte_count - bytes_read;
- var buffer_left = switch (endian) {
- .Little => buffer[bytes_read..],
- .Big => buffer[0..],
- };
-
+ // Copy the remaining N bytes (N < @sizeOf(Limb))
+ var bytes_written = limb_index * @sizeOf(Limb);
+ if (bytes_written != byte_count) {
var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
// 2's complement (bitwise not, then add carry bit)
if (!x.positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
- if (bytes_left == 1) {
- mem.writeInt(u8, buffer_left[0..1], @truncate(u8, limb), endian);
- } else if (@sizeOf(Limb) > 1 and bytes_left == 2) {
- mem.writeInt(u16, buffer_left[0..2], @truncate(u16, limb), endian);
- } else if (@sizeOf(Limb) > 2 and bytes_left == 4) {
- mem.writeInt(u32, buffer_left[0..4], @truncate(u32, limb), endian);
- } else if (@sizeOf(Limb) > 4 and bytes_left == 8) {
- mem.writeInt(u64, buffer_left[0..8], @truncate(u64, limb), endian);
- } else if (@sizeOf(Limb) > 8 and bytes_left == 16) {
- mem.writeInt(u128, buffer_left[0..16], @truncate(u128, limb), endian);
- } else if (@sizeOf(Limb) > 16) {
- @compileError("@sizeOf(Limb) exceeded supported range");
- } else unreachable;
+ while (bytes_written != byte_count) {
+ const write_size = std.math.floorPowerOfTwo(usize, byte_count - bytes_written);
+ var int_buffer = switch (endian) {
+ .Little => buffer[bytes_written..],
+ .Big => buffer[(abi_size - bytes_written - write_size)..],
+ };
+
+ if (write_size == 1) {
+ mem.writeInt(u8, int_buffer[0..1], @truncate(u8, limb), endian);
+ } else if (@sizeOf(Limb) >= 2 and write_size == 2) {
+ mem.writeInt(u16, int_buffer[0..2], @truncate(u16, limb), endian);
+ } else if (@sizeOf(Limb) >= 4 and write_size == 4) {
+ mem.writeInt(u32, int_buffer[0..4], @truncate(u32, limb), endian);
+ } else if (@sizeOf(Limb) >= 8 and write_size == 8) {
+ mem.writeInt(u64, int_buffer[0..8], @truncate(u64, limb), endian);
+ } else if (@sizeOf(Limb) >= 16 and write_size == 16) {
+ mem.writeInt(u128, int_buffer[0..16], @truncate(u128, limb), endian);
+ } else if (@sizeOf(Limb) >= 32) {
+ @compileError("@sizeOf(Limb) exceeded supported range");
+ } else unreachable;
+ limb >>= @intCast(Log2Limb, 8 * write_size);
+ bytes_written += write_size;
+ }
}
}
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index f6f210f56c..69600425a4 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -2498,16 +2498,103 @@ test "big int conversion read/write twos complement" {
defer testing.allocator.free(buffer1);
const endians = [_]std.builtin.Endian{ .Little, .Big };
+ const abi_size = 64;
for (endians) |endian| {
// Writing to buffer and back should not change anything
- a.toConst().writeTwosComplement(buffer1, 493, endian);
- m.readTwosComplement(buffer1, 493, endian, .unsigned);
+ a.toConst().writeTwosComplement(buffer1, 493, abi_size, endian);
+ m.readTwosComplement(buffer1, 493, abi_size, endian, .unsigned);
try testing.expect(m.toConst().order(a.toConst()) == .eq);
// Equivalent to @bitCast(i493, @as(u493, intMax(u493))
- a.toConst().writeTwosComplement(buffer1, 493, endian);
- m.readTwosComplement(buffer1, 493, endian, .signed);
+ a.toConst().writeTwosComplement(buffer1, 493, abi_size, endian);
+ m.readTwosComplement(buffer1, 493, abi_size, endian, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-1) == .eq);
}
}
+
+test "big int conversion read twos complement with padding" {
+ var a = try Managed.initSet(testing.allocator, 0x01_02030405_06070809_0a0b0c0d);
+ defer a.deinit();
+
+ var buffer1 = try testing.allocator.alloc(u8, 16);
+ defer testing.allocator.free(buffer1);
+ @memset(buffer1.ptr, 0xaa, buffer1.len);
+
+ // writeTwosComplement:
+ // (1) should not write beyond buffer[0..abi_size]
+ // (2) should correctly order bytes based on the provided endianness
+ // (3) should sign-extend any bits from bit_count to 8 * abi_size
+
+ var bit_count: usize = 12 * 8 + 1;
+ a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0xaa, 0xaa, 0xaa }));
+ a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xaa, 0xaa, 0xaa }));
+ a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0, 0x0, 0x0 }));
+ a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }));
+
+ @memset(buffer1.ptr, 0xaa, buffer1.len);
+ try a.set(-0x01_02030405_06070809_0a0b0c0d);
+ bit_count = 12 * 8 + 2;
+
+ a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xaa, 0xaa, 0xaa }));
+ a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3, 0xaa, 0xaa, 0xaa }));
+ a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff }));
+ a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }));
+}
+
+test "big int conversion write twos complement with padding" {
+ var a = try Managed.initSet(testing.allocator, 0x01_ffffffff_ffffffff_ffffffff);
+ defer a.deinit();
+
+ var m = a.toMutable();
+
+ // readTwosComplement:
+ // (1) should not read beyond buffer[0..abi_size]
+ // (2) should correctly interpret bytes based on the provided endianness
+ // (3) should ignore any bits from bit_count to 8 * abi_size
+
+ var bit_count: usize = 12 * 8 + 1;
+ var buffer: []const u8 = undefined;
+
+ buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xb };
+ m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ buffer = &[_]u8{ 0xb, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
+ m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xab, 0xaa, 0xaa, 0xaa };
+ m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xab, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
+ m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ bit_count = 12 * 8 + 2;
+
+ buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02 };
+ m.readTwosComplement(buffer, bit_count, 13, .Little, .signed);
+ try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ buffer = &[_]u8{ 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
+ m.readTwosComplement(buffer, bit_count, 13, .Big, .signed);
+ try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02, 0xaa, 0xaa, 0xaa };
+ m.readTwosComplement(buffer, bit_count, 16, .Little, .signed);
+ try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
+ m.readTwosComplement(buffer, bit_count, 16, .Big, .signed);
+ try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
+}
diff --git a/src/value.zig b/src/value.zig
index 2018eb3df3..a3dd6501e4 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1046,7 +1046,8 @@ pub const Value = extern union {
var bigint_buffer: BigIntSpace = undefined;
const bigint = val.toBigInt(&bigint_buffer);
const bits = ty.intInfo(target).bits;
- bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
+ const abi_size = ty.abiSize(target);
+ bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
},
.Enum => {
var enum_buffer: Payload.U64 = undefined;
@@ -1054,7 +1055,8 @@ pub const Value = extern union {
var bigint_buffer: BigIntSpace = undefined;
const bigint = int_val.toBigInt(&bigint_buffer);
const bits = ty.intInfo(target).bits;
- bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
+ const abi_size = ty.abiSize(target);
+ bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
},
.Float => switch (ty.floatBits(target)) {
16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
@@ -1096,8 +1098,9 @@ pub const Value = extern union {
const Limb = std.math.big.Limb;
const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
const limbs_buffer = try arena.alloc(Limb, limb_count);
+ const abi_size = ty.abiSize(target);
var bigint = BigIntMutable.init(limbs_buffer, 0);
- bigint.readTwosComplement(buffer, int_info.bits, endian, int_info.signedness);
+ bigint.readTwosComplement(buffer, int_info.bits, abi_size, endian, int_info.signedness);
return fromBigInt(arena, bigint.toConst());
},
.Float => switch (ty.floatBits(target)) {
From c586e3ba1bbc7784aee22bf96f69b8003611112a Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Sun, 13 Feb 2022 11:59:14 -0700
Subject: [PATCH 0168/2031] Add additional tests for `@bitCast`
---
lib/std/math/big/int_test.zig | 91 ++++++++++++++++++++++
test/behavior/bitcast.zig | 140 +++++++++-------------------------
2 files changed, 126 insertions(+), 105 deletions(-)
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 69600425a4..e7469326e4 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -2550,6 +2550,43 @@ test "big int conversion read twos complement with padding" {
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }));
}
+test "big int write twos complement +/- zero" {
+ var a = try Managed.initSet(testing.allocator, 0x0);
+ defer a.deinit();
+ var m = a.toMutable();
+
+ var buffer1 = try testing.allocator.alloc(u8, 16);
+ defer testing.allocator.free(buffer1);
+ @memset(buffer1.ptr, 0xaa, buffer1.len);
+
+ var bit_count: usize = 0;
+
+ // Test zero
+
+ m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
+ m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
+ m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
+ m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
+
+ @memset(buffer1.ptr, 0xaa, buffer1.len);
+ m.positive = false;
+
+ // Test negative zero
+
+ m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
+ m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
+ m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
+ m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
+}
+
test "big int conversion write twos complement with padding" {
var a = try Managed.initSet(testing.allocator, 0x01_ffffffff_ffffffff_ffffffff);
defer a.deinit();
@@ -2564,6 +2601,8 @@ test "big int conversion write twos complement with padding" {
var bit_count: usize = 12 * 8 + 1;
var buffer: []const u8 = undefined;
+ // Test 0x01_02030405_06070809_0a0b0c0d
+
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xb };
m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
@@ -2582,6 +2621,8 @@ test "big int conversion write twos complement with padding" {
bit_count = 12 * 8 + 2;
+ // Test -0x01_02030405_06070809_0a0b0c0d
+
buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02 };
m.readTwosComplement(buffer, bit_count, 13, .Little, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
@@ -2597,4 +2638,54 @@ test "big int conversion write twos complement with padding" {
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
m.readTwosComplement(buffer, bit_count, 16, .Big, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
+
+ // Test 0
+
+ buffer = &([_]u8{0} ** 16);
+ m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+
+ bit_count = 0;
+ buffer = &([_]u8{0xaa} ** 16);
+ m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+}
+
+test "big int conversion write twos complement zero" {
+ var a = try Managed.initSet(testing.allocator, 0x01_ffffffff_ffffffff_ffffffff);
+ defer a.deinit();
+
+ var m = a.toMutable();
+
+ // readTwosComplement:
+ // (1) should not read beyond buffer[0..abi_size]
+ // (2) should correctly interpret bytes based on the provided endianness
+ // (3) should ignore any bits from bit_count to 8 * abi_size
+
+ var bit_count: usize = 12 * 8 + 1;
+ var buffer: []const u8 = undefined;
+
+ buffer = &([_]u8{0} ** 13);
+ m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+
+ buffer = &([_]u8{0} ** 16);
+ m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
+ m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
}
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index 43d6524a4e..59a16c5fc9 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -6,123 +6,53 @@ const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const native_endian = builtin.target.cpu.arch.endian();
-test "@bitCast i32 -> u32" {
- try testBitCast_i32_u32();
- comptime try testBitCast_i32_u32();
+test "@bitCast iX -> uX" {
+ const bit_values = [_]usize{ 8, 16, 32, 64 };
+
+ inline for (bit_values) |bits| {
+ try testBitCast(bits);
+ comptime try testBitCast(bits);
+ }
}
-fn testBitCast_i32_u32() !void {
- try expect(conv_i32(-1) == maxInt(u32));
- try expect(conv_u32(maxInt(u32)) == -1);
- try expect(conv_u32(0x8000_0000) == minInt(i32));
- try expect(conv_i32(minInt(i32)) == 0x8000_0000);
-}
-
-fn conv_i32(x: i32) u32 {
- return @bitCast(u32, x);
-}
-fn conv_u32(x: u32) i32 {
- return @bitCast(i32, x);
-}
-
-test "@bitCast i48 -> u48" {
- try testBitCast_i48_u48();
- comptime try testBitCast_i48_u48();
-}
-
-fn testBitCast_i48_u48() !void {
+test "@bitCast iX -> uX exotic integers" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- try expect(conv_i48(-1) == maxInt(u48));
- try expect(conv_u48(maxInt(u48)) == -1);
- try expect(conv_u48(0x8000_0000_0000) == minInt(i48));
- try expect(conv_i48(minInt(i48)) == 0x8000_0000_0000);
+ const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 };
+
+ inline for (bit_values) |bits| {
+ try testBitCast(bits);
+ comptime try testBitCast(bits);
+ }
}
-fn conv_i48(x: i48) u48 {
- return @bitCast(u48, x);
+fn testBitCast(comptime N: usize) !void {
+ const iN = std.meta.Int(.signed, N);
+ const uN = std.meta.Int(.unsigned, N);
+
+ try expect(conv_iN(N, -1) == maxInt(uN));
+ try expect(conv_uN(N, maxInt(uN)) == -1);
+
+ try expect(conv_iN(N, maxInt(iN)) == maxInt(iN));
+ try expect(conv_uN(N, maxInt(iN)) == maxInt(iN));
+
+ try expect(conv_uN(N, 1 << (N - 1)) == minInt(iN));
+ try expect(conv_iN(N, minInt(iN)) == (1 << (N - 1)));
+
+ try expect(conv_uN(N, 0) == 0);
+ try expect(conv_iN(N, 0) == 0);
+
+ try expect(conv_iN(N, -0) == 0);
}
-fn conv_u48(x: u48) i48 {
- return @bitCast(i48, x);
+fn conv_iN(comptime N: usize, x: std.meta.Int(.signed, N)) std.meta.Int(.unsigned, N) {
+ return @bitCast(std.meta.Int(.unsigned, N), x);
}
-test "@bitCast i27 -> u27" {
- try testBitCast_i27_u27();
- comptime try testBitCast_i27_u27();
-}
-
-fn testBitCast_i27_u27() !void {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
- try expect(conv_i27(-1) == maxInt(u27));
- try expect(conv_u27(maxInt(u27)) == -1);
- try expect(conv_u27(0x400_0000) == minInt(i27));
- try expect(conv_i27(minInt(i27)) == 0x400_0000);
-}
-
-fn conv_i27(x: i27) u27 {
- return @bitCast(u27, x);
-}
-
-fn conv_u27(x: u27) i27 {
- return @bitCast(i27, x);
-}
-
-test "@bitCast i512 -> u512" {
- try testBitCast_i512_u512();
- comptime try testBitCast_i512_u512();
-}
-
-fn testBitCast_i512_u512() !void {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
- try expect(conv_i512(-1) == maxInt(u512));
- try expect(conv_u512(maxInt(u512)) == -1);
- try expect(conv_u512(@as(u512, 1) << 511) == minInt(i512));
- try expect(conv_i512(minInt(i512)) == (@as(u512, 1) << 511));
-}
-
-fn conv_i512(x: i512) u512 {
- return @bitCast(u512, x);
-}
-
-fn conv_u512(x: u512) i512 {
- return @bitCast(i512, x);
-}
-
-test "bitcast result to _" {
- _ = @bitCast(u8, @as(i8, 1));
-}
-
-test "@bitCast i493 -> u493" {
- try testBitCast_i493_u493();
- comptime try testBitCast_i493_u493();
-}
-
-fn testBitCast_i493_u493() !void {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
- try expect(conv_i493(-1) == maxInt(u493));
- try expect(conv_u493(maxInt(u493)) == -1);
- try expect(conv_u493(@as(u493, 1) << 492) == minInt(i493));
- try expect(conv_i493(minInt(i493)) == (@as(u493, 1) << 492));
-}
-
-fn conv_i493(x: i493) u493 {
- return @bitCast(u493, x);
-}
-
-fn conv_u493(x: u493) i493 {
- return @bitCast(i493, x);
+fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signed, N) {
+ return @bitCast(std.meta.Int(.signed, N), x);
}
test "nested bitcast" {
From 45aed7171c5cafb06671714ac1b15fdcf056d040 Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Sun, 13 Feb 2022 13:01:55 -0700
Subject: [PATCH 0169/2031] Skip 8/16-bit `@bitCast` test for wasm
---
test/behavior/bitcast.zig | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index 59a16c5fc9..1bf33af57b 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -6,8 +6,18 @@ const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const native_endian = builtin.target.cpu.arch.endian();
-test "@bitCast iX -> uX" {
- const bit_values = [_]usize{ 8, 16, 32, 64 };
+test "@bitCast iX -> uX (32, 64)" {
+ const bit_values = [_]usize{ 32, 64 };
+
+ inline for (bit_values) |bits| {
+ try testBitCast(bits);
+ comptime try testBitCast(bits);
+ }
+}
+
+test "@bitCast iX -> uX (8, 16, 128)" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ const bit_values = [_]usize{ 8, 16, 128 };
inline for (bit_values) |bits| {
try testBitCast(bits);
From f23005eba7d323d3f26e90972fc042f4d2002df7 Mon Sep 17 00:00:00 2001
From: ominitay <37453713+ominitay@users.noreply.github.com>
Date: Sat, 8 Jan 2022 21:14:22 +0000
Subject: [PATCH 0170/2031] std.c.darwin.Stat: use timespec
Uses timespec for times in `Stat` instead of two `isize` fields per time. This matches the header file.
---
lib/std/c/darwin.zig | 27 +++++++--------------------
1 file changed, 7 insertions(+), 20 deletions(-)
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index f4ca9cd6dd..3f5b8b340a 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -372,14 +372,10 @@ pub const Stat = extern struct {
uid: uid_t,
gid: gid_t,
rdev: i32,
- atimesec: isize,
- atimensec: isize,
- mtimesec: isize,
- mtimensec: isize,
- ctimesec: isize,
- ctimensec: isize,
- birthtimesec: isize,
- birthtimensec: isize,
+ atimespec: timespec,
+ mtimespec: timespec,
+ ctimespec: timespec,
+ birthtimespec: timespec,
size: off_t,
blocks: i64,
blksize: i32,
@@ -389,24 +385,15 @@ pub const Stat = extern struct {
qspare: [2]i64,
pub fn atime(self: @This()) timespec {
- return timespec{
- .tv_sec = self.atimesec,
- .tv_nsec = self.atimensec,
- };
+ return self.atimespec;
}
pub fn mtime(self: @This()) timespec {
- return timespec{
- .tv_sec = self.mtimesec,
- .tv_nsec = self.mtimensec,
- };
+ return self.mtimespec;
}
pub fn ctime(self: @This()) timespec {
- return timespec{
- .tv_sec = self.ctimesec,
- .tv_nsec = self.ctimensec,
- };
+ return self.ctimespec;
}
};
From 3dd3c5063b1ab939e41461d0c3a3318708d9f76f Mon Sep 17 00:00:00 2001
From: ominitay <37453713+ominitay@users.noreply.github.com>
Date: Sat, 8 Jan 2022 21:16:26 +0000
Subject: [PATCH 0171/2031] std.c.haiku: move Stat.crtime to Stat.birthtime
---
lib/std/c/haiku.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig
index 5997855ea3..176d53e6ae 100644
--- a/lib/std/c/haiku.zig
+++ b/lib/std/c/haiku.zig
@@ -226,7 +226,7 @@ pub const Stat = extern struct {
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
- pub fn crtime(self: @This()) timespec {
+ pub fn birthtime(self: @This()) timespec {
return self.crtim;
}
};
From d978fdaa67f087cd17a122fec9ce65eabf9053ea Mon Sep 17 00:00:00 2001
From: ominitay <37453713+ominitay@users.noreply.github.com>
Date: Sat, 8 Jan 2022 21:18:42 +0000
Subject: [PATCH 0172/2031] std.c.Wasi.Stat: use timespec
---
lib/std/c/wasi.zig | 24 ++++++------------------
lib/std/os/wasi.zig | 2 +-
2 files changed, 7 insertions(+), 19 deletions(-)
diff --git a/lib/std/c/wasi.zig b/lib/std/c/wasi.zig
index 9c9148a783..c3635784dd 100644
--- a/lib/std/c/wasi.zig
+++ b/lib/std/c/wasi.zig
@@ -41,32 +41,20 @@ pub const Stat = extern struct {
blksize: i32,
blocks: i64,
- atimesec: time_t,
- atimensec: isize,
- mtimesec: time_t,
- mtimensec: isize,
- ctimesec: time_t,
- ctimensec: isize,
+ atim: timespec,
+ mtim: timespec,
+ ctim: timespec,
pub fn atime(self: @This()) timespec {
- return timespec{
- .tv_sec = self.atimesec,
- .tv_nsec = self.atimensec,
- };
+ return self.atim;
}
pub fn mtime(self: @This()) timespec {
- return timespec{
- .tv_sec = self.mtimesec,
- .tv_nsec = self.mtimensec,
- };
+ return self.mtim;
}
pub fn ctime(self: @This()) timespec {
- return timespec{
- .tv_sec = self.ctimesec,
- .tv_nsec = self.ctimensec,
- };
+ return self.ctim;
}
};
diff --git a/lib/std/os/wasi.zig b/lib/std/os/wasi.zig
index 029deca3fd..0b2538cb88 100644
--- a/lib/std/os/wasi.zig
+++ b/lib/std/os/wasi.zig
@@ -88,7 +88,7 @@ pub const mode_t = u32;
pub const time_t = i64; // match https://github.com/CraneStation/wasi-libc
-pub const timespec = struct {
+pub const timespec = extern struct {
tv_sec: time_t,
tv_nsec: isize,
From b2610649fcf43c678fd81f3bef71f3ba42ee3606 Mon Sep 17 00:00:00 2001
From: ominitay <37453713+ominitay@users.noreply.github.com>
Date: Sat, 8 Jan 2022 21:19:35 +0000
Subject: [PATCH 0173/2031] std.c.*: add birthtime function to Stat
Adds a birthtime function to the `Stat` structs of Unices which support this. This is done to match the `atime`, `mtime`, and `ctime` functions.
---
lib/std/c/darwin.zig | 4 ++++
lib/std/c/freebsd.zig | 4 ++++
lib/std/c/netbsd.zig | 4 ++++
lib/std/c/openbsd.zig | 4 ++++
4 files changed, 16 insertions(+)
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 3f5b8b340a..2d72f69024 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -395,6 +395,10 @@ pub const Stat = extern struct {
pub fn ctime(self: @This()) timespec {
return self.ctimespec;
}
+
+ pub fn birthtime(self: @This()) timespec {
+ return self.birthtimespec;
+ }
};
pub const timespec = extern struct {
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index a19ecd3bac..10ab830ab0 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -283,6 +283,10 @@ pub const Stat = extern struct {
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
+
+ pub fn birthtime(self: @This()) timespec {
+ return self.birthtim;
+ }
};
pub const timespec = extern struct {
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index 4fbc7594b0..17c3bfd83c 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -312,6 +312,10 @@ pub const Stat = extern struct {
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
+
+ pub fn birthtime(self: @This()) timespec {
+ return self.birthtim;
+ }
};
pub const timespec = extern struct {
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index 6ba11e8e5a..4ee3351023 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -235,6 +235,10 @@ pub const Stat = extern struct {
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
+
+ pub fn birthtime(self: @This()) timespec {
+ return self.birthtim;
+ }
};
pub const timespec = extern struct {
From 11b4cc589c85b6330824ac9a64bfb0829cc1adb0 Mon Sep 17 00:00:00 2001
From: ominitay <37453713+ominitay@users.noreply.github.com>
Date: Sat, 8 Jan 2022 21:22:23 +0000
Subject: [PATCH 0174/2031] std.fs: Implement cross-platform metadata API
Implements a cross-platform metadata API, aiming to reduce unnecessary Unix-dependence of the `std.fs` api. Presently, all OSes beside Windows are treated as Unix; this is likely the best way to treat things by default, instead of explicitly listing each Unix-like OS.
Platform-specific operations are not provided by `File.Metadata`, and instead are to be accessed from `File.Metadata.inner`.
Adds:
- File.setPermissions() : Sets permission of a file according to a `Permissions` struct (not available on WASI)
- File.Permissions : A cross-platform representation of file permissions
- Permissions.readOnly() : Returns whether the file is read-only
- Permissions.setReadOnly() : Sets whether the file is read-only
- Permissions.unixSet() : Sets permissions for a class (UNIX-only)
- Permissions.unixGet() : Checks a permission for a class (UNIX-only)
- Permissions.unixNew() : Returns a new Permissions struct to represent the passed mode (UNIX-only)
- File.Metadata : A cross-platform representation of file metadata
- Metadata.size() : Returns the size of a file
- Metadata.permissions() : Returns a `Permissions` struct, representing permissions on the file
- Metadata.kind() : Returns the `Kind` of the file
- Metadata.accessed() : Returns the time the file was last accessed
- Metadata.modified() : Returns the time the file was last modified
- Metadata.created() : Returns the time the file was created (this is an optional, as the underlying filesystem, or OS may not support this)
Methods of `File.Metadata` are also available for the below, so I won't repeat myself
The below may be used for platform-specific functionality
- File.MetadataUnix : The internal implementation of `File.Metadata` on Unices
- File.MetadataLinux : The internal implementation of `File.Metadata` on Linux
- File.MetadataWindows : The implementation of `File.Metadata` on Windows
---
lib/std/fs.zig | 25 +++
lib/std/fs/file.zig | 479 ++++++++++++++++++++++++++++++++++++++++++++
lib/std/fs/test.zig | 76 +++++++
3 files changed, 580 insertions(+)
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index f7818ea68b..7b577b1b62 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -2231,6 +2231,31 @@ pub const Dir = struct {
}
pub const ChownError = File.ChownError;
+
+ const Permissions = File.Permissions;
+ pub const SetPermissionsError = File.SetPermissionsError;
+
+ /// Sets permissions according to the provided `Permissions` struct.
+ /// This method is *NOT* available on WASI
+ pub fn setPermissions(self: Dir, permissions: Permissions) SetPermissionsError!void {
+ const file: File = .{
+ .handle = self.fd,
+ .capable_io_mode = .blocking,
+ };
+ try file.setPermissions(permissions);
+ }
+
+ const Metadata = File.Metadata;
+ pub const MetadataError = File.MetadataError;
+
+ /// Returns a `Metadata` struct, representing the permissions on the directory
+ pub fn metadata(self: Dir) MetadataError!Metadata {
+ const file: File = .{
+ .handle = self.fd,
+ .capable_io_mode = .blocking,
+ };
+ return try file.metadata();
+ }
};
/// Returns a handle to the current working directory. It is not opened with iteration capability.
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index ff71c82d2b..079d5905a1 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -398,6 +398,485 @@ pub const File = struct {
try os.fchown(self.handle, owner, group);
}
+ /// Cross-platform representation of permissions on a file.
+ /// The `readonly` and `setReadonly` are the only methods available across all platforms.
+ /// Platform-specific functionality is available through the `inner` field.
+ pub const Permissions = struct {
+ /// You may use the `inner` field to use platform-specific functionality
+ inner: switch (builtin.os.tag) {
+ .windows => PermissionsWindows,
+ else => PermissionsUnix,
+ },
+
+ const Self = @This();
+
+ /// Returns `true` if permissions represent an unwritable file.
+ /// On Unix, `true` is returned only if no class has write permissions.
+ pub fn readOnly(self: Self) bool {
+ return self.inner.readOnly();
+ }
+
+ /// Sets whether write permissions are provided.
+ /// On Unix, this affects *all* classes. If this is undesired, use `unixSet`
+ /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
+ pub fn setReadOnly(self: *Self, read_only: bool) void {
+ self.inner.setReadOnly(read_only);
+ }
+ };
+
+ pub const PermissionsWindows = struct {
+ attributes: os.windows.DWORD,
+
+ const Self = @This();
+
+ /// Returns `true` if permissions represent an unwritable file.
+ pub fn readOnly(self: Self) bool {
+ return self.attributes & os.windows.FILE_ATTRIBUTE_READONLY != 0;
+ }
+
+ /// Sets whether write permissions are provided.
+ /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
+ pub fn setReadOnly(self: *Self, read_only: bool) void {
+ if (read_only) {
+ self.attributes |= os.windows.FILE_ATTRIBUTE_READONLY;
+ } else {
+ self.attributes &= ~@as(os.windows.DWORD, os.windows.FILE_ATTRIBUTE_READONLY);
+ }
+ }
+ };
+
+ pub const PermissionsUnix = struct {
+ mode: Mode,
+
+ const Self = @This();
+
+ /// Returns `true` if permissions represent an unwritable file.
+ /// `true` is returned only if no class has write permissions.
+ pub fn readOnly(self: Self) bool {
+ return self.mode & 0o222 == 0;
+ }
+
+ /// Sets whether write permissions are provided.
+ /// This affects *all* classes. If this is undesired, use `unixSet`
+ /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
+ pub fn setReadOnly(self: *Self, read_only: bool) void {
+ if (read_only) {
+ self.mode &= ~@as(Mode, 0o222);
+ } else {
+ self.mode |= @as(Mode, 0o222);
+ }
+ }
+
+ pub const Class = enum(u2) {
+ user = 2,
+ group = 1,
+ other = 0,
+ };
+
+ pub const Permission = enum(u3) {
+ read = 0o4,
+ write = 0o2,
+ execute = 0o1,
+ };
+
+ /// Returns `true` if the chosen class has the selected permission.
+ /// This method is only available on Unix platforms.
+ pub fn unixHas(self: Self, class: Class, permission: Permission) bool {
+ const mask = @as(Mode, @enumToInt(permission)) << @as(u3, @enumToInt(class)) * 3;
+ return self.mode & mask != 0;
+ }
+
+ /// Sets the permissions for the chosen class. Any permissions set to `null` are left unchanged.
+ /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
+ pub fn unixSet(self: *Self, class: Class, permissions: struct {
+ read: ?bool = null,
+ write: ?bool = null,
+ execute: ?bool = null,
+ }) void {
+ const shift = @as(u3, @enumToInt(class)) * 3;
+ if (permissions.read) |r| {
+ if (r) {
+ self.mode |= @as(Mode, 0o4) << shift;
+ } else {
+ self.mode &= ~(@as(Mode, 0o4) << shift);
+ }
+ }
+ if (permissions.write) |w| {
+ if (w) {
+ self.mode |= @as(Mode, 0o2) << shift;
+ } else {
+ self.mode &= ~(@as(Mode, 0o2) << shift);
+ }
+ }
+ if (permissions.execute) |x| {
+ if (x) {
+ self.mode |= @as(Mode, 0o1) << shift;
+ } else {
+ self.mode &= ~(@as(Mode, 0o1) << shift);
+ }
+ }
+ }
+
+ /// Returns a `Permissions` struct representing the permissions from the passed mode.
+ pub fn unixNew(new_mode: Mode) Self {
+ return Self{
+ .mode = new_mode,
+ };
+ }
+ };
+
+ pub const SetPermissionsError = ChmodError;
+
+ /// Sets permissions according to the provided `Permissions` struct.
+ /// This method is *NOT* available on WASI
+ pub fn setPermissions(self: File, permissions: Permissions) SetPermissionsError!void {
+ switch (builtin.os.tag) {
+ .windows => {
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ var info = windows.FILE_BASIC_INFORMATION{
+ .CreationTime = 0,
+ .LastAccessTime = 0,
+ .LastWriteTime = 0,
+ .ChangeTime = 0,
+ .FileAttributes = permissions.inner.attributes,
+ };
+ const rc = windows.ntdll.NtSetInformationFile(
+ self.handle,
+ &io_status_block,
+ &info,
+ @sizeOf(windows.FILE_BASIC_INFORMATION),
+ .FileBasicInformation,
+ );
+ switch (rc) {
+ .SUCCESS => return,
+ .INVALID_HANDLE => unreachable,
+ .ACCESS_DENIED => return error.AccessDenied,
+ else => return windows.unexpectedStatus(rc),
+ }
+ },
+ .wasi => @compileError("Unsupported OS"), // Wasi filesystem does not *yet* support chmod
+ else => {
+ try self.chmod(permissions.inner.mode);
+ },
+ }
+ }
+
+ /// Cross-platform representation of file metadata.
+ /// Platform-specific functionality is available through the `inner` field.
+ pub const Metadata = struct {
+ /// You may use the `inner` field to use platform-specific functionality
+ inner: switch (builtin.os.tag) {
+ .windows => MetadataWindows,
+ .linux => MetadataLinux,
+ else => MetadataUnix,
+ },
+
+ const Self = @This();
+
+ /// Returns the size of the file
+ pub fn size(self: Self) u64 {
+ return self.inner.size();
+ }
+
+ /// Returns a `Permissions` struct, representing the permissions on the file
+ pub fn permissions(self: Self) Permissions {
+ return self.inner.permissions();
+ }
+
+ /// Returns the `Kind` of file.
+ /// On Windows, can only return: `.File`, `.Directory`, `.SymLink` or `.Unknown`
+ pub fn kind(self: Self) Kind {
+ return self.inner.kind();
+ }
+
+ /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
+ pub fn accessed(self: Self) i128 {
+ return self.inner.accessed();
+ }
+
+ /// Returns the time the file was modified in nanoseconds since UTC 1970-01-01
+ pub fn modified(self: Self) i128 {
+ return self.inner.modified();
+ }
+
+ /// Returns the time the file was created in nanoseconds since UTC 1970-01-01
+ /// On Windows, this cannot return null
+ /// On Linux, this returns null if the filesystem does not support creation times, or if the kernel is older than 4.11
+ /// On Unices, this returns null if the filesystem or OS does not support creation times
+ /// On MacOS, this returns the ctime if the filesystem does not support creation times; this is insanity, and yet another reason to hate on Apple
+ pub fn created(self: Self) ?i128 {
+ return self.inner.created();
+ }
+ };
+
+ pub const MetadataUnix = struct {
+ stat: os.Stat,
+
+ const Self = @This();
+
+ /// Returns the size of the file
+ pub fn size(self: Self) u64 {
+ return @intCast(u64, self.stat.size);
+ }
+
+ /// Returns a `Permissions` struct, representing the permissions on the file
+ pub fn permissions(self: Self) Permissions {
+ return Permissions{ .inner = PermissionsUnix{ .mode = self.stat.mode } };
+ }
+
+ /// Returns the `Kind` of the file
+ pub fn kind(self: Self) Kind {
+ if (builtin.os.tag == .wasi and !builtin.link_libc) return switch (self.stat.filetype) {
+ .BLOCK_DEVICE => Kind.BlockDevice,
+ .CHARACTER_DEVICE => Kind.CharacterDevice,
+ .DIRECTORY => Kind.Directory,
+ .SYMBOLIC_LINK => Kind.SymLink,
+ .REGULAR_FILE => Kind.File,
+ .SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
+ else => Kind.Unknown,
+ };
+
+ const m = self.stat.mode & os.S.IFMT;
+
+ switch (m) {
+ os.S.IFBLK => return Kind.BlockDevice,
+ os.S.IFCHR => return Kind.CharacterDevice,
+ os.S.IFDIR => return Kind.Directory,
+ os.S.IFIFO => return Kind.NamedPipe,
+ os.S.IFLNK => return Kind.SymLink,
+ os.S.IFREG => return Kind.File,
+ os.S.IFSOCK => return Kind.UnixDomainSocket,
+ else => {},
+ }
+
+ if (builtin.os.tag == .solaris) switch (m) {
+ os.S.IFDOOR => return Kind.Door,
+ os.S.IFPORT => return Kind.EventPort,
+ else => {},
+ };
+
+ return .Unknown;
+ }
+
+ /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
+ pub fn accessed(self: Self) i128 {
+ const atime = self.stat.atime();
+ return @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec;
+ }
+
+ /// Returns the last time the file was modified in nanoseconds since UTC 1970-01-01
+ pub fn modified(self: Self) i128 {
+ const mtime = self.stat.mtime();
+ return @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec;
+ }
+
+ /// Returns the time the file was created in nanoseconds since UTC 1970-01-01
+ /// Returns null if this is not supported by the OS or filesystem
+ pub fn created(self: Self) ?i128 {
+ if (!@hasDecl(@TypeOf(self.stat), "birthtime")) return null;
+ const birthtime = self.stat.birthtime();
+
+ // If the filesystem doesn't support this the value *should* be:
+ // On FreeBSD: tv_nsec = 0, tv_sec = -1
+ // On NetBSD and OpenBSD: tv_nsec = 0, tv_sec = 0
+ // On MacOS, it is set to ctime -- we cannot detect this!!
+ switch (builtin.os.tag) {
+ .freebsd => if (birthtime.tv_sec == -1 and birthtime.tv_nsec == 0) return null,
+ .netbsd, .openbsd => if (birthtime.tv_sec == 0 and birthtime.tv_nsec == 0) return null,
+ .macos => {},
+ else => @compileError("Creation time detection not implemented for OS"),
+ }
+
+ return @as(i128, birthtime.tv_sec) * std.time.ns_per_s + birthtime.tv_nsec;
+ }
+ };
+
+ /// `MetadataUnix`, but using Linux's `statx` syscall.
+ /// On Linux versions below 4.11, `statx` will be filled with data from stat.
+ pub const MetadataLinux = struct {
+ statx: os.linux.Statx,
+
+ const Self = @This();
+
+ /// Returns the size of the file
+ pub fn size(self: Self) u64 {
+ return self.statx.size;
+ }
+
+ /// Returns a `Permissions` struct, representing the permissions on the file
+ pub fn permissions(self: Self) Permissions {
+ return Permissions{ .inner = PermissionsUnix{ .mode = self.statx.mode } };
+ }
+
+ /// Returns the `Kind` of the file
+ pub fn kind(self: Self) Kind {
+ const m = self.statx.mode & os.S.IFMT;
+
+ switch (m) {
+ os.S.IFBLK => return Kind.BlockDevice,
+ os.S.IFCHR => return Kind.CharacterDevice,
+ os.S.IFDIR => return Kind.Directory,
+ os.S.IFIFO => return Kind.NamedPipe,
+ os.S.IFLNK => return Kind.SymLink,
+ os.S.IFREG => return Kind.File,
+ os.S.IFSOCK => return Kind.UnixDomainSocket,
+ else => {},
+ }
+
+ return .Unknown;
+ }
+
+ /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
+ pub fn accessed(self: Self) i128 {
+ return @as(i128, self.statx.atime.tv_sec) * std.time.ns_per_s + self.statx.atime.tv_nsec;
+ }
+
+ /// Returns the last time the file was modified in nanoseconds since UTC 1970-01-01
+ pub fn modified(self: Self) i128 {
+ return @as(i128, self.statx.mtime.tv_sec) * std.time.ns_per_s + self.statx.mtime.tv_nsec;
+ }
+
+ /// Returns the time the file was created in nanoseconds since UTC 1970-01-01
+ /// Returns null if this is not supported by the filesystem, or on kernels before than version 4.11
+ pub fn created(self: Self) ?i128 {
+ if (self.statx.mask & os.linux.STATX_BTIME == 0) return null;
+ return @as(i128, self.statx.btime.tv_sec) * std.time.ns_per_s + self.statx.btime.tv_nsec;
+ }
+ };
+
+ pub const MetadataWindows = struct {
+ attributes: windows.DWORD,
+ reparse_tag: windows.DWORD,
+ _size: u64,
+ access_time: i128,
+ modified_time: i128,
+ creation_time: i128,
+
+ const Self = @This();
+
+ /// Returns the size of the file
+ pub fn size(self: Self) u64 {
+ return self._size;
+ }
+
+ /// Returns a `Permissions` struct, representing the permissions on the file
+ pub fn permissions(self: Self) Permissions {
+ return Permissions{ .inner = PermissionsWindows{ .attributes = self.attributes } };
+ }
+
+ /// Returns the `Kind` of the file.
+ /// Can only return: `.File`, `.Directory`, `.SymLink` or `.Unknown`
+ pub fn kind(self: Self) Kind {
+ if (self.attributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) {
+ if (self.reparse_tag & 0x20000000 != 0) {
+ return .SymLink;
+ }
+ } else if (self.attributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) {
+ return .Directory;
+ } else {
+ return .File;
+ }
+ return .Unknown;
+ }
+
+ /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
+ pub fn accessed(self: Self) i128 {
+ return self.access_time;
+ }
+
+ /// Returns the time the file was modified in nanoseconds since UTC 1970-01-01
+ pub fn modified(self: Self) i128 {
+ return self.modified_time;
+ }
+
+ /// Returns the time the file was created in nanoseconds since UTC 1970-01-01
+ /// This never returns null, only returning an optional for compatibility with other OSes
+ pub fn created(self: Self) ?i128 {
+ return self.creation_time;
+ }
+ };
+
+ pub const MetadataError = os.FStatError;
+
+ pub fn metadata(self: File) MetadataError!Metadata {
+ return Metadata{
+ .inner = switch (builtin.os.tag) {
+ .windows => blk: {
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ var info: windows.FILE_ALL_INFORMATION = undefined;
+
+ const rc = windows.ntdll.NtQueryInformationFile(self.handle, &io_status_block, &info, @sizeOf(windows.FILE_ALL_INFORMATION), .FileAllInformation);
+ switch (rc) {
+ .SUCCESS => {},
+ .BUFFER_OVERFLOW => {},
+ .INVALID_PARAMETER => unreachable,
+ .ACCESS_DENIED => return error.AccessDenied,
+ else => return windows.unexpectedStatus(rc),
+ }
+
+ const reparse_tag: windows.DWORD = reparse_blk: {
+ if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) {
+ var reparse_buf: [windows.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined;
+ try windows.DeviceIoControl(self.handle, windows.FSCTL_GET_REPARSE_POINT, null, reparse_buf[0..]);
+ const reparse_struct = @ptrCast(*const windows.REPARSE_DATA_BUFFER, @alignCast(@alignOf(windows.REPARSE_DATA_BUFFER), &reparse_buf[0]));
+ break :reparse_blk reparse_struct.ReparseTag;
+ }
+ break :reparse_blk 0;
+ };
+
+ break :blk MetadataWindows{
+ .attributes = info.BasicInformation.FileAttributes,
+ .reparse_tag = reparse_tag,
+ ._size = @bitCast(u64, info.StandardInformation.EndOfFile),
+ .access_time = windows.fromSysTime(info.BasicInformation.LastAccessTime),
+ .modified_time = windows.fromSysTime(info.BasicInformation.LastWriteTime),
+ .creation_time = windows.fromSysTime(info.BasicInformation.CreationTime),
+ };
+ },
+ .linux => blk: {
+ var stx = mem.zeroes(os.linux.Statx);
+ const rcx = os.linux.statx(self.handle, "\x00", os.linux.AT.EMPTY_PATH, os.linux.STATX_TYPE | os.linux.STATX_MODE | os.linux.STATX_ATIME | os.linux.STATX_MTIME | os.linux.STATX_BTIME, &stx);
+
+ switch (os.errno(rcx)) {
+ .SUCCESS => {},
+ // NOSYS happens when `statx` is unsupported, which is the case on kernel versions before 4.11
+ // Here, we call `fstat` and fill `stx` with the data we need
+ .NOSYS => {
+ const st = try os.fstat(self.handle);
+
+ stx.mode = @intCast(u16, st.mode);
+
+ // Hacky conversion from timespec to statx_timestamp
+ stx.atime = std.mem.zeroes(os.linux.statx_timestamp);
+ stx.atime.tv_sec = st.atim.tv_sec;
+ stx.atime.tv_nsec = @intCast(u32, st.atim.tv_nsec); // Guaranteed to succeed (tv_nsec is always below 10^9)
+
+ stx.mtime = std.mem.zeroes(os.linux.statx_timestamp);
+ stx.mtime.tv_sec = st.mtim.tv_sec;
+ stx.mtime.tv_nsec = @intCast(u32, st.mtim.tv_nsec);
+
+ stx.mask = os.linux.STATX_BASIC_STATS | os.linux.STATX_MTIME;
+ },
+ .BADF => unreachable,
+ .FAULT => unreachable,
+ .NOMEM => return error.SystemResources,
+ else => |err| return os.unexpectedErrno(err),
+ }
+
+ break :blk MetadataLinux{
+ .statx = stx,
+ };
+ },
+ else => blk: {
+ const st = try os.fstat(self.handle);
+ break :blk MetadataUnix{
+ .stat = st,
+ };
+ },
+ },
+ };
+ }
+
pub const UpdateTimesError = os.FutimensError || windows.SetFileTimeError;
/// The underlying file system may have a different granularity than nanoseconds,
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 309d9c3b07..b00b0609b2 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -1101,3 +1101,79 @@ test "chown" {
defer dir.close();
try dir.chown(null, null);
}
+
+test "File.Metadata" {
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const file = try tmp.dir.createFile("test_file", .{ .read = true });
+ defer file.close();
+
+ const metadata = try file.metadata();
+ try testing.expect(metadata.kind() == .File);
+ try testing.expect(metadata.size() == 0);
+ _ = metadata.accessed();
+ _ = metadata.modified();
+ _ = metadata.created();
+}
+
+test "File.Permissions" {
+ if (builtin.os.tag == .wasi)
+ return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const file = try tmp.dir.createFile("test_file", .{ .read = true });
+ defer file.close();
+
+ const metadata = try file.metadata();
+ var permissions = metadata.permissions();
+
+ try testing.expect(!permissions.readOnly());
+ permissions.setReadOnly(true);
+ try testing.expect(permissions.readOnly());
+
+ try file.setPermissions(permissions);
+ const new_permissions = (try file.metadata()).permissions();
+ try testing.expect(new_permissions.readOnly());
+
+ // Must be set to non-read-only to delete
+ permissions.setReadOnly(false);
+ try file.setPermissions(permissions);
+}
+
+test "File.PermissionsUnix" {
+ if (builtin.os.tag == .windows or builtin.os.tag == .wasi)
+ return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const file = try tmp.dir.createFile("test_file", .{ .mode = 0o666, .read = true });
+ defer file.close();
+
+ const metadata = try file.metadata();
+ var permissions = metadata.permissions();
+
+ permissions.setReadOnly(true);
+ try testing.expect(permissions.readOnly());
+ try testing.expect(!permissions.inner.unixHas(.user, .write));
+ permissions.inner.unixSet(.user, .{ .write = true });
+ try testing.expect(!permissions.readOnly());
+ try testing.expect(permissions.inner.unixHas(.user, .write));
+ try testing.expect(permissions.inner.mode & 0o400 != 0);
+
+ permissions.setReadOnly(true);
+ try file.setPermissions(permissions);
+ permissions = (try file.metadata()).permissions();
+ try testing.expect(permissions.readOnly());
+
+ // Must be set to non-read-only to delete
+ permissions.setReadOnly(false);
+ try file.setPermissions(permissions);
+
+ const permissions_unix = File.PermissionsUnix.unixNew(0o754);
+ try testing.expect(permissions_unix.unixHas(.user, .execute));
+ try testing.expect(!permissions_unix.unixHas(.other, .execute));
+}
From 7edf3d9f2d53f5b1c5e31ee1294ff9b52337760b Mon Sep 17 00:00:00 2001
From: Cody Tapscott
Date: Sun, 13 Feb 2022 14:16:40 -0700
Subject: [PATCH 0175/2031] Cast abi_size to usize
---
src/value.zig | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/value.zig b/src/value.zig
index a3dd6501e4..d07179359f 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1046,7 +1046,7 @@ pub const Value = extern union {
var bigint_buffer: BigIntSpace = undefined;
const bigint = val.toBigInt(&bigint_buffer);
const bits = ty.intInfo(target).bits;
- const abi_size = ty.abiSize(target);
+ const abi_size = @intCast(usize, ty.abiSize(target));
bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
},
.Enum => {
@@ -1055,7 +1055,7 @@ pub const Value = extern union {
var bigint_buffer: BigIntSpace = undefined;
const bigint = int_val.toBigInt(&bigint_buffer);
const bits = ty.intInfo(target).bits;
- const abi_size = ty.abiSize(target);
+ const abi_size = @intCast(usize, ty.abiSize(target));
bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
},
.Float => switch (ty.floatBits(target)) {
@@ -1098,7 +1098,7 @@ pub const Value = extern union {
const Limb = std.math.big.Limb;
const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
const limbs_buffer = try arena.alloc(Limb, limb_count);
- const abi_size = ty.abiSize(target);
+ const abi_size = @intCast(usize, ty.abiSize(target));
var bigint = BigIntMutable.init(limbs_buffer, 0);
bigint.readTwosComplement(buffer, int_info.bits, abi_size, endian, int_info.signedness);
return fromBigInt(arena, bigint.toConst());
From f5068107cdf54f143a150feb5d500962ba98e963 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sun, 13 Feb 2022 20:40:51 +0100
Subject: [PATCH 0176/2031] stage2 regalloc: track Inst instead of ?Inst in
register mapping
The information whether a register is allocated to an instruction is
already encoded in the free_registers "bitmap". Duplicating that
information in the registers map is unnecessary and may lead to
performance degradations.
---
src/register_manager.zig | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 81c9fa5734..3d76e94ba0 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -33,11 +33,12 @@ pub fn RegisterManager(
assert(callee_preserved_regs.len > 0); // see note above
return struct {
- /// Tracks the AIR instruction allocated to every register or
- /// `null` if no instruction is allocated to a register
+ /// Tracks the AIR instruction allocated to every register. If
+ /// no instruction is allocated to a register (i.e. the
+ /// register is free), the value in that slot is undefined.
///
/// The key must be canonical register.
- registers: [callee_preserved_regs.len]?Air.Inst.Index = [_]?Air.Inst.Index{null} ** callee_preserved_regs.len,
+ registers: [callee_preserved_regs.len]Air.Inst.Index = undefined,
/// Tracks which registers are free (in which case the
/// corresponding bit is set to 1)
free_registers: FreeRegInt = math.maxInt(FreeRegInt),
@@ -201,14 +202,14 @@ pub fn RegisterManager(
if (self.isRegFree(reg)) {
self.markRegUsed(reg);
} else {
- const spilled_inst = self.registers[index].?;
+ const spilled_inst = self.registers[index];
try self.getFunction().spillInstruction(reg, spilled_inst);
}
self.registers[index] = inst;
} else {
// Don't track the register
if (!self.isRegFree(reg)) {
- const spilled_inst = self.registers[index].?;
+ const spilled_inst = self.registers[index];
try self.getFunction().spillInstruction(reg, spilled_inst);
self.freeReg(reg);
}
@@ -241,7 +242,7 @@ pub fn RegisterManager(
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
- const spilled_inst = self.registers[index].?;
+ const spilled_inst = self.registers[index];
self.registers[index] = tracked_inst;
try self.getFunction().spillInstruction(reg, spilled_inst);
} else {
@@ -251,7 +252,7 @@ pub fn RegisterManager(
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
- const spilled_inst = self.registers[index].?;
+ const spilled_inst = self.registers[index];
try self.getFunction().spillInstruction(reg, spilled_inst);
self.freeReg(reg);
}
@@ -265,7 +266,7 @@ pub fn RegisterManager(
const index = reg.allocIndex() orelse return;
self.markRegAllocated(reg);
- assert(self.registers[index] == null);
+ assert(self.isRegFree(reg));
self.registers[index] = inst;
self.markRegUsed(reg);
}
@@ -275,7 +276,7 @@ pub fn RegisterManager(
const index = reg.allocIndex() orelse return;
log.debug("freeing register {}", .{reg});
- self.registers[index] = null;
+ self.registers[index] = undefined;
self.markRegFree(reg);
}
};
From 9ca3c897ec546bc08eefe53471c995deefc9f36d Mon Sep 17 00:00:00 2001
From: Sebastian Keller
Date: Sun, 13 Feb 2022 21:58:41 +0100
Subject: [PATCH 0177/2031] test_runner.zig: Do not log test name twice
In #10859 I moved the `test_node.end()` call after everything else has
been logged. Now the `test_fn.name` is printed by `Progress` itself,
making the additional log obsolete.
---
lib/std/special/test_runner.zig | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 201a5ccd90..3a5849b1a2 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -70,7 +70,7 @@ pub fn main() void {
.blocking => {
skip_count += 1;
test_node.end();
- progress.log("{s}... SKIP (async test)\n", .{test_fn.name});
+ progress.log("SKIP (async test)\n", .{});
if (!have_tty) std.debug.print("SKIP (async test)\n", .{});
continue;
},
@@ -82,13 +82,13 @@ pub fn main() void {
} else |err| switch (err) {
error.SkipZigTest => {
skip_count += 1;
- progress.log("{s}... SKIP\n", .{test_fn.name});
+ progress.log("SKIP\n", .{});
if (!have_tty) std.debug.print("SKIP\n", .{});
test_node.end();
},
else => {
fail_count += 1;
- progress.log("{s}... FAIL ({s})\n", .{ test_fn.name, @errorName(err) });
+ progress.log("FAIL ({s})\n", .{@errorName(err)});
if (!have_tty) std.debug.print("FAIL ({s})\n", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
From 8ed792b640ef3601dfb773d670915d74fbbbad13 Mon Sep 17 00:00:00 2001
From: erikarvstedt <36110478+erikarvstedt@users.noreply.github.com>
Date: Mon, 14 Feb 2022 11:14:50 +0100
Subject: [PATCH 0178/2031] std.Progress: fix suffix printing
Previously, `suffix` was copied to `output_buffer` at position
`max_end`, thereby writing into reserved space after `max_end`.
This only worked because `suffix` was not larger than
`bytes_needed_for_esc_codes_at_end` (otherwise there'd be a potential
buffer overrun) and no escape codes at end are actually written.
Since 2d5b2bf1c986d037ef965bf8c9b4d8dfd5967478, escape codes are no
longer written to the end of the buffer. They are now written
exclusively to the front of the buffer.
This allows removing `bytes_needed_for_esc_codes_at_end` and
simplifying the suffix printing logic.
This also fixes the bug that the ellipse suffix was not printed in
Windows terminals because `end.* > max_end` was never true.
---
lib/std/Progress.zig | 24 ++++++++++--------------
1 file changed, 10 insertions(+), 14 deletions(-)
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index ecef04c600..07f9077844 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -312,16 +312,10 @@ fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: any
error.NoSpaceLeft => {
self.columns_written += self.output_buffer.len - end.*;
end.* = self.output_buffer.len;
+ const suffix = "... ";
+ std.mem.copy(u8, self.output_buffer[self.output_buffer.len - suffix.len ..], suffix);
},
}
- const bytes_needed_for_esc_codes_at_end: u8 = if (self.is_windows_terminal) 0 else 11;
- const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end;
- if (end.* > max_end) {
- const suffix = "... ";
- self.columns_written = self.columns_written - (end.* - max_end) + suffix.len;
- std.mem.copy(u8, self.output_buffer[max_end..], suffix);
- end.* = max_end + suffix.len;
- }
}
test "basic functionality" {
@@ -335,6 +329,8 @@ test "basic functionality" {
const root_node = progress.start("", 100);
defer root_node.end();
+ const speed_factor = std.time.ns_per_ms;
+
const sub_task_names = [_][]const u8{
"reticulating splines",
"adjusting shoes",
@@ -350,24 +346,24 @@ test "basic functionality" {
next_sub_task = (next_sub_task + 1) % sub_task_names.len;
node.completeOne();
- std.time.sleep(5 * std.time.ns_per_ms);
+ std.time.sleep(5 * speed_factor);
node.completeOne();
node.completeOne();
- std.time.sleep(5 * std.time.ns_per_ms);
+ std.time.sleep(5 * speed_factor);
node.completeOne();
node.completeOne();
- std.time.sleep(5 * std.time.ns_per_ms);
+ std.time.sleep(5 * speed_factor);
node.end();
- std.time.sleep(5 * std.time.ns_per_ms);
+ std.time.sleep(5 * speed_factor);
}
{
var node = root_node.start("this is a really long name designed to activate the truncation code. let's find out if it works", 0);
node.activate();
- std.time.sleep(10 * std.time.ns_per_ms);
+ std.time.sleep(10 * speed_factor);
progress.refresh();
- std.time.sleep(10 * std.time.ns_per_ms);
+ std.time.sleep(10 * speed_factor);
node.end();
}
}
From ee69a4b45f67e929fe5780ab6bf44360f6511d26 Mon Sep 17 00:00:00 2001
From: John Schmidt
Date: Sat, 12 Feb 2022 20:03:16 +0100
Subject: [PATCH 0179/2031] stage2: improve compiler error message for bad
union init
---
src/Sema.zig | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 7dbc36af37..480a6a1ca2 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2631,9 +2631,24 @@ fn validateUnionInit(
union_ptr: Air.Inst.Ref,
) CompileError!void {
if (instrs.len != 1) {
- // TODO add note for other field
- // TODO add note for union declared here
- return sema.fail(block, init_src, "only one union field can be active at once", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ init_src,
+ "cannot initialize multiple union fields at once, unions can only have one active field",
+ .{},
+ );
+ errdefer msg.destroy(sema.gpa);
+
+ for (instrs[1..]) |inst| {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const inst_src: LazySrcLoc = .{ .node_offset_back2tok = inst_data.src_node };
+ try sema.errNote(block, inst_src, msg, "additional initializer here", .{});
+ }
+ try sema.mod.errNoteNonLazy(union_obj.srcLoc(), msg, "union declared here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
const field_ptr = instrs[0];
From b85c0d6a472255362f692ddbc8afd14c2da4a996 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Mon, 14 Feb 2022 15:23:45 +0200
Subject: [PATCH 0180/2031] std: fix tests that were not run due to refAllDecls
regression
---
lib/std/zig/parser_test.zig | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index a0cc11ce4b..5eca272b62 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -233,7 +233,7 @@ test "zig fmt: eof after missing comma" {
try testError(
\\foo()
, &[_]Error{
- .expected_token,
+ .expected_comma_after_field,
});
}
@@ -5074,8 +5074,8 @@ test "recovery: missing comma" {
\\ }
\\}
, &[_]Error{
- .expected_token,
- .expected_token,
+ .expected_comma_after_switch_prong,
+ .expected_comma_after_switch_prong,
.invalid_token,
});
}
@@ -5156,10 +5156,10 @@ test "recovery: missing semicolon" {
\\ @foo
\\}
, &[_]Error{
- .expected_token,
- .expected_token,
+ .expected_semi_after_stmt,
+ .expected_semi_after_stmt,
.expected_param_list,
- .expected_token,
+ .expected_semi_after_stmt,
});
}
@@ -5174,9 +5174,9 @@ test "recovery: invalid container members" {
\\}
, &[_]Error{
.expected_expr,
- .expected_token,
+ .expected_comma_after_field,
.expected_container_members,
- .expected_token,
+ .expected_semi_after_stmt,
});
}
@@ -5201,7 +5201,7 @@ test "recovery: mismatched bracket at top level" {
\\ arr: 128]?G
\\};
, &[_]Error{
- .expected_token,
+ .expected_comma_after_field,
});
}
@@ -5301,9 +5301,9 @@ test "recovery: missing comma in params" {
\\fn bar(a: i32, b: i32 c) void { }
\\
, &[_]Error{
- .expected_token,
- .expected_token,
- .expected_token,
+ .expected_comma_after_param,
+ .expected_comma_after_param,
+ .expected_comma_after_param,
});
}
From 04f3d9301798e41ac8272822328914073e01a6ab Mon Sep 17 00:00:00 2001
From: Al Hoang <3811822-hoanga@users.noreply.gitlab.com>
Date: Sat, 18 Dec 2021 23:30:51 -0600
Subject: [PATCH 0181/2031] haiku add missing cimport include for compilation
---
src/Compilation.zig | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/Compilation.zig b/src/Compilation.zig
index bd7581863b..0e1714ba38 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -4467,7 +4467,7 @@ fn detectLibCIncludeDirs(
}
fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
- var list = try std.ArrayList([]const u8).initCapacity(arena, 4);
+ var list = try std.ArrayList([]const u8).initCapacity(arena, 5);
list.appendAssumeCapacity(lci.include_dir.?);
@@ -4487,6 +4487,9 @@ fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const
const include_dir_path = lci.include_dir orelse return error.LibCInstallationNotAvailable;
const os_dir = try std.fs.path.join(arena, &[_][]const u8{ include_dir_path, "os" });
list.appendAssumeCapacity(os_dir);
+ // Errors.h
+ const os_support_dir = try std.fs.path.join(arena, &[_][]const u8{ include_dir_path, "os/support" });
+ list.appendAssumeCapacity(os_support_dir);
const config_dir = try std.fs.path.join(arena, &[_][]const u8{ include_dir_path, "config" });
list.appendAssumeCapacity(config_dir);
From 27cfbf949a25375a6ae7671a00ed3f7bae67d3f9 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sat, 12 Feb 2022 18:41:16 +0100
Subject: [PATCH 0182/2031] macho: re-enable creating dSYM bundle
* update number of type abbrevs to match Elf linker
* update `DebugSymbols` to write symbol and string tables
at the end to match the `MachO` linker
* TODO: update segment vm addresses when growing segments in
the binary
* TODO: store DWARF relocations in linker's interned arena
---
src/link/MachO.zig | 106 ++++---
src/link/MachO/DebugSymbols.zig | 499 ++++++++++++++++++++------------
2 files changed, 370 insertions(+), 235 deletions(-)
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index b8e2ae0840..5b4ce9b99c 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -354,32 +354,31 @@ pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
return self;
}
- // TODO Migrate DebugSymbols to the merged linker codepaths
- // if (!options.strip and options.module != null) {
- // // Create dSYM bundle.
- // const dir = options.module.?.zig_cache_artifact_directory;
- // log.debug("creating {s}.dSYM bundle in {s}", .{ sub_path, dir.path });
+ if (!options.strip and options.module != null) {
+ // Create dSYM bundle.
+ const dir = options.module.?.zig_cache_artifact_directory;
+ log.debug("creating {s}.dSYM bundle in {s}", .{ emit.sub_path, dir.path });
- // const d_sym_path = try fmt.allocPrint(
- // allocator,
- // "{s}.dSYM" ++ fs.path.sep_str ++ "Contents" ++ fs.path.sep_str ++ "Resources" ++ fs.path.sep_str ++ "DWARF",
- // .{sub_path},
- // );
- // defer allocator.free(d_sym_path);
+ const d_sym_path = try fmt.allocPrint(
+ allocator,
+ "{s}.dSYM" ++ fs.path.sep_str ++ "Contents" ++ fs.path.sep_str ++ "Resources" ++ fs.path.sep_str ++ "DWARF",
+ .{emit.sub_path},
+ );
+ defer allocator.free(d_sym_path);
- // var d_sym_bundle = try dir.handle.makeOpenPath(d_sym_path, .{});
- // defer d_sym_bundle.close();
+ var d_sym_bundle = try dir.handle.makeOpenPath(d_sym_path, .{});
+ defer d_sym_bundle.close();
- // const d_sym_file = try d_sym_bundle.createFile(sub_path, .{
- // .truncate = false,
- // .read = true,
- // });
+ const d_sym_file = try d_sym_bundle.createFile(emit.sub_path, .{
+ .truncate = false,
+ .read = true,
+ });
- // self.d_sym = .{
- // .base = self,
- // .file = d_sym_file,
- // };
- // }
+ self.d_sym = .{
+ .base = self,
+ .file = d_sym_file,
+ };
+ }
// Index 0 is always a null symbol.
try self.locals.append(allocator, .{
@@ -393,8 +392,8 @@ pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
try self.populateMissingMetadata();
- if (self.d_sym) |*ds| {
- try ds.populateMissingMetadata(allocator);
+ if (self.d_sym) |*d_sym| {
+ try d_sym.populateMissingMetadata(allocator);
}
return self;
@@ -1048,9 +1047,9 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
try self.updateSectionOrdinals();
try self.writeLinkeditSegment();
- if (self.d_sym) |*ds| {
+ if (self.d_sym) |*d_sym| {
// Flush debug symbols bundle.
- try ds.flushModule(self.base.allocator, self.base.options);
+ try d_sym.flushModule(self.base.allocator, self.base.options);
}
if (self.requires_adhoc_codesig) {
@@ -3374,8 +3373,8 @@ pub fn deinit(self: *MachO) void {
if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator);
}
- if (self.d_sym) |*ds| {
- ds.deinit(self.base.allocator);
+ if (self.d_sym) |*d_sym| {
+ d_sym.deinit(self.base.allocator);
}
self.section_ordinals.deinit(self.base.allocator);
@@ -3497,13 +3496,13 @@ fn freeAtom(self: *MachO, atom: *Atom, match: MatchingSection, owns_atom: bool)
}
}
- if (self.d_sym) |*ds| {
- if (ds.dbg_info_decl_first == atom) {
- ds.dbg_info_decl_first = atom.dbg_info_next;
+ if (self.d_sym) |*d_sym| {
+ if (d_sym.dbg_info_decl_first == atom) {
+ d_sym.dbg_info_decl_first = atom.dbg_info_next;
}
- if (ds.dbg_info_decl_last == atom) {
+ if (d_sym.dbg_info_decl_last == atom) {
// TODO shrink the .debug_info section size here
- ds.dbg_info_decl_last = atom.dbg_info_prev;
+ d_sym.dbg_info_decl_last = atom.dbg_info_prev;
}
}
@@ -3675,6 +3674,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl = func.owner_decl;
self.freeUnnamedConsts(decl);
+
// TODO clearing the code and relocs buffer should probably be orchestrated
// in a different, smarter, more automatic way somewhere else, in a more centralised
// way than this.
@@ -3686,8 +3686,8 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
defer code_buffer.deinit();
var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined;
- const debug_buffers = if (self.d_sym) |*ds| blk: {
- debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl);
+ const debug_buffers = if (self.d_sym) |*d_sym| blk: {
+ debug_buffers_buf = try d_sym.initDeclDebugBuffers(self.base.allocator, module, decl);
break :blk &debug_buffers_buf;
} else null;
defer {
@@ -3725,13 +3725,9 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
_ = try self.placeDecl(decl, decl.link.macho.code.items.len);
if (debug_buffers) |db| {
- try self.d_sym.?.commitDeclDebugInfo(
- self.base.allocator,
- module,
- decl,
- db,
- self.base.options.target,
- );
+ if (self.d_sym) |*d_sym| {
+ try d_sym.commitDeclDebugInfo(self.base.allocator, module, decl, db);
+ }
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -3827,8 +3823,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
defer code_buffer.deinit();
var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined;
- const debug_buffers = if (self.d_sym) |*ds| blk: {
- debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl);
+ const debug_buffers = if (self.d_sym) |*d_sym| blk: {
+ debug_buffers_buf = try d_sym.initDeclDebugBuffers(self.base.allocator, module, decl);
break :blk &debug_buffers_buf;
} else null;
defer {
@@ -4125,8 +4121,8 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
- if (self.d_sym) |*ds| {
- try ds.updateDeclLineNumber(module, decl);
+ if (self.d_sym) |*d_sym| {
+ try d_sym.updateDeclLineNumber(module, decl);
}
}
@@ -4322,27 +4318,27 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
_ = self.atom_by_index_table.remove(decl.link.macho.local_sym_index);
decl.link.macho.local_sym_index = 0;
}
- if (self.d_sym) |*ds| {
+ if (self.d_sym) |*d_sym| {
// TODO make this logic match freeAtom. Maybe abstract the logic
// out since the same thing is desired for both.
- _ = ds.dbg_line_fn_free_list.remove(&decl.fn_link.macho);
+ _ = d_sym.dbg_line_fn_free_list.remove(&decl.fn_link.macho);
if (decl.fn_link.macho.prev) |prev| {
- ds.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
+ d_sym.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
prev.next = decl.fn_link.macho.next;
if (decl.fn_link.macho.next) |next| {
next.prev = prev;
} else {
- ds.dbg_line_fn_last = prev;
+ d_sym.dbg_line_fn_last = prev;
}
} else if (decl.fn_link.macho.next) |next| {
- ds.dbg_line_fn_first = next;
+ d_sym.dbg_line_fn_first = next;
next.prev = null;
}
- if (ds.dbg_line_fn_first == &decl.fn_link.macho) {
- ds.dbg_line_fn_first = decl.fn_link.macho.next;
+ if (d_sym.dbg_line_fn_first == &decl.fn_link.macho) {
+ d_sym.dbg_line_fn_first = decl.fn_link.macho.next;
}
- if (ds.dbg_line_fn_last == &decl.fn_link.macho) {
- ds.dbg_line_fn_last = decl.fn_link.macho.prev;
+ if (d_sym.dbg_line_fn_last == &decl.fn_link.macho) {
+ d_sym.dbg_line_fn_last = decl.fn_link.macho.prev;
}
}
}
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index beef0b6b2c..36d93b8255 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -3,7 +3,8 @@ const DebugSymbols = @This();
const std = @import("std");
const assert = std.debug.assert;
const fs = std.fs;
-const log = std.log.scoped(.dsym);
+const log = std.log.scoped(.link);
+const leb128 = std.leb;
const macho = std.macho;
const math = std.math;
const mem = std.mem;
@@ -22,8 +23,6 @@ const SrcFn = MachO.SrcFn;
const makeStaticString = MachO.makeStaticString;
const padToIdeal = MachO.padToIdeal;
-const page_size: u16 = 0x1000;
-
base: *MachO,
file: fs.File,
@@ -49,9 +48,6 @@ uuid_cmd_index: ?u16 = null,
/// Index into __TEXT,__text section.
text_section_index: ?u16 = null,
-linkedit_off: u16 = page_size,
-linkedit_size: u16 = page_size,
-
debug_info_section_index: ?u16 = null,
debug_abbrev_section_index: ?u16 = null,
debug_str_section_index: ?u16 = null,
@@ -76,7 +72,6 @@ dbg_info_decl_last: ?*TextBlock = null,
debug_string_table: std.ArrayListUnmanaged(u8) = .{},
load_commands_dirty: bool = false,
-strtab_dirty: bool = false,
debug_string_table_dirty: bool = false,
debug_abbrev_section_dirty: bool = false,
debug_aranges_section_dirty: bool = false,
@@ -87,8 +82,12 @@ const abbrev_compile_unit = 1;
const abbrev_subprogram = 2;
const abbrev_subprogram_retvoid = 3;
const abbrev_base_type = 4;
-const abbrev_pad1 = 5;
-const abbrev_parameter = 6;
+const abbrev_ptr_type = 5;
+const abbrev_struct_type = 6;
+const abbrev_anon_struct_type = 7;
+const abbrev_struct_member = 8;
+const abbrev_pad1 = 9;
+const abbrev_parameter = 10;
/// The reloc offset for the virtual address of a function in its Line Number Program.
/// Size is a virtual address integer.
@@ -108,30 +107,21 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
try self.load_commands.append(allocator, base_cmd);
self.load_commands_dirty = true;
}
+
if (self.symtab_cmd_index == null) {
self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len);
- const base_cmd = self.base.load_commands.items[self.base.symtab_cmd_index.?].symtab;
- const symtab_size = base_cmd.nsyms * @sizeOf(macho.nlist_64);
- const symtab_off = self.findFreeSpaceLinkedit(symtab_size, @sizeOf(macho.nlist_64));
-
- log.debug("found symbol table free space 0x{x} to 0x{x}", .{ symtab_off, symtab_off + symtab_size });
-
- const strtab_off = self.findFreeSpaceLinkedit(base_cmd.strsize, 1);
-
- log.debug("found string table free space 0x{x} to 0x{x}", .{ strtab_off, strtab_off + base_cmd.strsize });
-
- try self.load_commands.append(allocator, .{
+ try self.load_commands.append(self.base.base.allocator, .{
.symtab = .{
.cmdsize = @sizeOf(macho.symtab_command),
- .symoff = @intCast(u32, symtab_off),
- .nsyms = base_cmd.nsyms,
- .stroff = @intCast(u32, strtab_off),
- .strsize = base_cmd.strsize,
+ .symoff = 0,
+ .nsyms = 0,
+ .stroff = 0,
+ .strsize = 0,
},
});
self.load_commands_dirty = true;
- self.strtab_dirty = true;
}
+
if (self.pagezero_segment_cmd_index == null) {
self.pagezero_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.pagezero_segment_cmd_index.?].segment;
@@ -139,6 +129,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
+
if (self.text_segment_cmd_index == null) {
self.text_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.text_segment_cmd_index.?].segment;
@@ -146,6 +137,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
+
if (self.data_const_segment_cmd_index == null) outer: {
if (self.base.data_const_segment_cmd_index == null) break :outer; // __DATA_CONST is optional
self.data_const_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
@@ -154,6 +146,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
+
if (self.data_segment_cmd_index == null) outer: {
if (self.base.data_segment_cmd_index == null) break :outer; // __DATA is optional
self.data_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
@@ -162,26 +155,29 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
+
if (self.linkedit_segment_cmd_index == null) {
self.linkedit_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const base_cmd = self.base.load_commands.items[self.base.linkedit_segment_cmd_index.?].segment;
var cmd = try self.copySegmentCommand(allocator, base_cmd);
- cmd.inner.vmsize = self.linkedit_size;
- cmd.inner.fileoff = self.linkedit_off;
- cmd.inner.filesize = self.linkedit_size;
+ // TODO this needs reworking
+ cmd.inner.vmsize = self.base.page_size;
+ cmd.inner.fileoff = self.base.page_size;
+ cmd.inner.filesize = self.base.page_size;
try self.load_commands.append(allocator, .{ .segment = cmd });
self.load_commands_dirty = true;
}
+
if (self.dwarf_segment_cmd_index == null) {
self.dwarf_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
const linkedit = self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const ideal_size: u16 = 200 + 128 + 160 + 250;
- const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), page_size);
- const off = linkedit.inner.fileoff + linkedit.inner.filesize;
+ const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.base.page_size);
+ const fileoff = linkedit.inner.fileoff + linkedit.inner.filesize;
const vmaddr = linkedit.inner.vmaddr + linkedit.inner.vmsize;
- log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size });
+ log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ fileoff, fileoff + needed_size });
try self.load_commands.append(allocator, .{
.segment = .{
@@ -189,13 +185,14 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
.segname = makeStaticString("__DWARF"),
.vmaddr = vmaddr,
.vmsize = needed_size,
- .fileoff = off,
+ .fileoff = fileoff,
.filesize = needed_size,
},
},
});
self.load_commands_dirty = true;
}
+
if (self.debug_str_section_index == null) {
assert(self.debug_string_table.items.len == 0);
self.debug_str_section_index = try self.allocateSection(
@@ -205,18 +202,22 @@ pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void
);
self.debug_string_table_dirty = true;
}
+
if (self.debug_info_section_index == null) {
self.debug_info_section_index = try self.allocateSection("__debug_info", 200, 0);
self.debug_info_header_dirty = true;
}
+
if (self.debug_abbrev_section_index == null) {
self.debug_abbrev_section_index = try self.allocateSection("__debug_abbrev", 128, 0);
self.debug_abbrev_section_dirty = true;
}
+
if (self.debug_aranges_section_index == null) {
self.debug_aranges_section_index = try self.allocateSection("__debug_aranges", 160, 4);
self.debug_aranges_section_dirty = true;
}
+
if (self.debug_line_section_index == null) {
self.debug_line_section_index = try self.allocateSection("__debug_line", 250, 0);
self.debug_line_header_dirty = true;
@@ -300,41 +301,91 @@ pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Opti
// we can simply append these bytes.
const abbrev_buf = [_]u8{
abbrev_compile_unit, DW.TAG.compile_unit, DW.CHILDREN.yes, // header
- DW.AT.stmt_list, DW.FORM.sec_offset, // offset
- DW.AT.low_pc, DW.FORM.addr,
- DW.AT.high_pc, DW.FORM.addr,
- DW.AT.name, DW.FORM.strp,
- DW.AT.comp_dir, DW.FORM.strp,
- DW.AT.producer, DW.FORM.strp,
- DW.AT.language, DW.FORM.data2,
- 0, 0, // table sentinel
- abbrev_subprogram, DW.TAG.subprogram, DW.CHILDREN.yes, // header
- DW.AT.low_pc, DW.FORM.addr, // start VM address
- DW.AT.high_pc, DW.FORM.data4,
- DW.AT.type, DW.FORM.ref4,
- DW.AT.name, DW.FORM.string,
- DW.AT.decl_line, DW.FORM.data4,
- DW.AT.decl_file, DW.FORM.data1,
+ DW.AT.stmt_list, DW.FORM.sec_offset, DW.AT.low_pc,
+ DW.FORM.addr, DW.AT.high_pc, DW.FORM.addr,
+ DW.AT.name, DW.FORM.strp, DW.AT.comp_dir,
+ DW.FORM.strp, DW.AT.producer, DW.FORM.strp,
+ DW.AT.language, DW.FORM.data2, 0,
+ 0, // table sentinel
+ abbrev_subprogram,
+ DW.TAG.subprogram,
+ DW.CHILDREN.yes, // header
+ DW.AT.low_pc,
+ DW.FORM.addr,
+ DW.AT.high_pc,
+ DW.FORM.data4,
+ DW.AT.type,
+ DW.FORM.ref4,
+ DW.AT.name,
+ DW.FORM.string,
0, 0, // table sentinel
abbrev_subprogram_retvoid,
DW.TAG.subprogram, DW.CHILDREN.yes, // header
DW.AT.low_pc, DW.FORM.addr,
DW.AT.high_pc, DW.FORM.data4,
DW.AT.name, DW.FORM.string,
- DW.AT.decl_line, DW.FORM.data4,
- DW.AT.decl_file, DW.FORM.data1,
- 0, 0, // table sentinel
- abbrev_base_type, DW.TAG.base_type, DW.CHILDREN.no, // header
- DW.AT.encoding, DW.FORM.data1, DW.AT.byte_size,
- DW.FORM.data1, DW.AT.name, DW.FORM.string,
- 0, 0, // table sentinel
- abbrev_pad1, DW.TAG.unspecified_type, DW.CHILDREN.no, // header
- 0, 0, // table sentinel
- abbrev_parameter, DW.TAG.formal_parameter, DW.CHILDREN.no, // header
- DW.AT.location, DW.FORM.exprloc, DW.AT.type,
- DW.FORM.ref4, DW.AT.name, DW.FORM.string,
- 0, 0, // table sentinel
- 0, 0, 0, // section sentinel
+ 0,
+ 0, // table sentinel
+ abbrev_base_type,
+ DW.TAG.base_type,
+ DW.CHILDREN.no, // header
+ DW.AT.encoding,
+ DW.FORM.data1,
+ DW.AT.byte_size,
+ DW.FORM.data1,
+ DW.AT.name,
+ DW.FORM.string,
+ 0,
+ 0, // table sentinel
+ abbrev_ptr_type,
+ DW.TAG.pointer_type,
+ DW.CHILDREN.no, // header
+ DW.AT.type,
+ DW.FORM.ref4,
+ 0,
+ 0, // table sentinel
+ abbrev_struct_type,
+ DW.TAG.structure_type,
+ DW.CHILDREN.yes, // header
+ DW.AT.byte_size,
+ DW.FORM.sdata,
+ DW.AT.name,
+ DW.FORM.string,
+ 0,
+ 0, // table sentinel
+ abbrev_anon_struct_type,
+ DW.TAG.structure_type,
+ DW.CHILDREN.yes, // header
+ DW.AT.byte_size,
+ DW.FORM.sdata,
+ 0,
+ 0, // table sentinel
+ abbrev_struct_member,
+ DW.TAG.member,
+ DW.CHILDREN.no, // header
+ DW.AT.name,
+ DW.FORM.string,
+ DW.AT.type,
+ DW.FORM.ref4,
+ DW.AT.data_member_location,
+ DW.FORM.sdata,
+ 0,
+ 0, // table sentinel
+ abbrev_pad1,
+ DW.TAG.unspecified_type,
+ DW.CHILDREN.no, // header
+ 0,
+ 0, // table sentinel
+ abbrev_parameter,
+ DW.TAG.formal_parameter, DW.CHILDREN.no, // header
+ DW.AT.location, DW.FORM.exprloc,
+ DW.AT.type, DW.FORM.ref4,
+ DW.AT.name, DW.FORM.string,
+ 0,
+ 0, // table sentinel
+ 0,
+ 0,
+ 0, // section sentinel
};
const needed_size = abbrev_buf.len;
@@ -583,13 +634,12 @@ pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Opti
}
}
- try self.writeStringTable();
+ try self.writeLinkeditSegment();
self.updateDwarfSegment();
try self.writeLoadCommands(allocator);
try self.writeHeader();
assert(!self.load_commands_dirty);
- assert(!self.strtab_dirty);
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
assert(!self.debug_string_table_dirty);
@@ -663,7 +713,7 @@ fn updateDwarfSegment(self: *DebugSymbols) void {
if (file_size != dwarf_segment.inner.filesize) {
dwarf_segment.inner.filesize = file_size;
if (dwarf_segment.inner.vmsize < dwarf_segment.inner.filesize) {
- dwarf_segment.inner.vmsize = mem.alignForwardGeneric(u64, dwarf_segment.inner.filesize, page_size);
+ dwarf_segment.inner.vmsize = mem.alignForwardGeneric(u64, dwarf_segment.inner.filesize, self.base.page_size);
}
self.load_commands_dirty = true;
}
@@ -719,23 +769,10 @@ fn writeHeader(self: *DebugSymbols) !void {
try self.file.pwriteAll(mem.asBytes(&header), 0);
}
-fn allocatedSizeLinkedit(self: *DebugSymbols, start: u64) u64 {
- assert(start > 0);
- var min_pos: u64 = std.math.maxInt(u64);
-
- if (self.symtab_cmd_index) |idx| {
- const symtab = self.load_commands.items[idx].symtab;
- if (symtab.symoff >= start and symtab.symoff < min_pos) min_pos = symtab.symoff;
- if (symtab.stroff >= start and symtab.stroff < min_pos) min_pos = symtab.stroff;
- }
-
- return min_pos - start;
-}
-
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
const seg = self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
assert(start >= seg.inner.fileoff);
- var min_pos: u64 = seg.inner.fileoff + seg.inner.filesize;
+ var min_pos: u64 = std.math.maxInt(u64);
for (seg.sections.items) |section| {
if (section.offset <= start) continue;
if (section.offset < min_pos) min_pos = section.offset;
@@ -743,102 +780,72 @@ fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
return min_pos - start;
}
-fn detectAllocCollisionLinkedit(self: *DebugSymbols, start: u64, size: u64) ?u64 {
- const end = start + padToIdeal(size);
-
- if (self.symtab_cmd_index) |idx| outer: {
- if (self.load_commands.items.len == idx) break :outer;
- const symtab = self.load_commands.items[idx].symtab;
- {
- // Symbol table
- const symsize = symtab.nsyms * @sizeOf(macho.nlist_64);
- const increased_size = padToIdeal(symsize);
- const test_end = symtab.symoff + increased_size;
- if (end > symtab.symoff and start < test_end) {
- return test_end;
- }
- }
- {
- // String table
- const increased_size = padToIdeal(symtab.strsize);
- const test_end = symtab.stroff + increased_size;
- if (end > symtab.stroff and start < test_end) {
- return test_end;
- }
- }
- }
-
- return null;
-}
-
-fn findFreeSpaceLinkedit(self: *DebugSymbols, object_size: u64, min_alignment: u16) u64 {
- var start: u64 = self.linkedit_off;
- while (self.detectAllocCollisionLinkedit(start, object_size)) |item_end| {
- start = mem.alignForwardGeneric(u64, item_end, min_alignment);
- }
- return start;
-}
-
-fn relocateSymbolTable(self: *DebugSymbols) !void {
- const symtab = &self.load_commands.items[self.symtab_cmd_index.?].symtab;
- const nlocals = self.base.locals.items.len;
- const nglobals = self.base.globals.items.len;
- const nsyms = nlocals + nglobals;
-
- if (symtab.nsyms < nsyms) {
- const needed_size = nsyms * @sizeOf(macho.nlist_64);
- if (needed_size > self.allocatedSizeLinkedit(symtab.symoff)) {
- // Move the entire symbol table to a new location
- const new_symoff = self.findFreeSpaceLinkedit(needed_size, @alignOf(macho.nlist_64));
- const existing_size = symtab.nsyms * @sizeOf(macho.nlist_64);
-
- assert(new_symoff + existing_size <= self.linkedit_off + self.linkedit_size); // TODO expand LINKEDIT segment.
- log.debug("relocating symbol table from 0x{x}-0x{x} to 0x{x}-0x{x}", .{
- symtab.symoff,
- symtab.symoff + existing_size,
- new_symoff,
- new_symoff + existing_size,
- });
-
- const amt = try self.file.copyRangeAll(symtab.symoff, self.file, new_symoff, existing_size);
- if (amt != existing_size) return error.InputOutput;
- symtab.symoff = @intCast(u32, new_symoff);
- }
- symtab.nsyms = @intCast(u32, nsyms);
- self.load_commands_dirty = true;
- }
-}
-
-pub fn writeLocalSymbol(self: *DebugSymbols, index: usize) !void {
+fn writeLinkeditSegment(self: *DebugSymbols) !void {
const tracy = trace(@src());
defer tracy.end();
- try self.relocateSymbolTable();
+
+ const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
+ seg.inner.filesize = 0;
+
+ try self.writeSymbolTable();
+ try self.writeStringTable();
+}
+
+fn writeSymbolTable(self: *DebugSymbols) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].symtab;
- const off = symtab.symoff + @sizeOf(macho.nlist_64) * index;
- log.debug("writing local symbol {} at 0x{x}", .{ index, off });
- try self.file.pwriteAll(mem.asBytes(&self.base.locals.items[index]), off);
+ symtab.symoff = @intCast(u32, seg.inner.fileoff + seg.inner.filesize);
+
+ var locals = std.ArrayList(macho.nlist_64).init(self.base.base.allocator);
+ defer locals.deinit();
+
+ for (self.base.locals.items) |sym| {
+ if (sym.n_strx == 0) continue;
+ if (self.base.symbol_resolver.get(sym.n_strx)) |_| continue;
+ try locals.append(sym);
+ }
+
+ const nlocals = locals.items.len;
+ const nexports = self.base.globals.items.len;
+
+ const locals_off = symtab.symoff;
+ const locals_size = nlocals * @sizeOf(macho.nlist_64);
+ log.debug("writing local symbols from 0x{x} to 0x{x}", .{ locals_off, locals_size + locals_off });
+ try self.file.pwriteAll(mem.sliceAsBytes(locals.items), locals_off);
+
+ const exports_off = locals_off + locals_size;
+ const exports_size = nexports * @sizeOf(macho.nlist_64);
+ log.debug("writing exported symbols from 0x{x} to 0x{x}", .{ exports_off, exports_size + exports_off });
+ try self.file.pwriteAll(mem.sliceAsBytes(self.base.globals.items), exports_off);
+
+ symtab.nsyms = @intCast(u32, nlocals + nexports);
+ seg.inner.filesize += locals_size + exports_size;
+
+ self.load_commands_dirty = true;
}
fn writeStringTable(self: *DebugSymbols) !void {
- if (!self.strtab_dirty) return;
-
const tracy = trace(@src());
defer tracy.end();
+ const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].segment;
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].symtab;
- const allocated_size = self.allocatedSizeLinkedit(symtab.stroff);
- const needed_size = mem.alignForwardGeneric(u64, self.base.strtab.items.len, @alignOf(u64));
+ symtab.stroff = @intCast(u32, seg.inner.fileoff + seg.inner.filesize);
+ symtab.strsize = @intCast(u32, mem.alignForwardGeneric(u64, self.base.strtab.items.len, @alignOf(u64)));
+ seg.inner.filesize += symtab.strsize;
- if (needed_size > allocated_size) {
- symtab.strsize = 0;
- symtab.stroff = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, 1));
- }
- symtab.strsize = @intCast(u32, needed_size);
log.debug("writing string table from 0x{x} to 0x{x}", .{ symtab.stroff, symtab.stroff + symtab.strsize });
try self.file.pwriteAll(self.base.strtab.items, symtab.stroff);
+
+ if (symtab.strsize > self.base.strtab.items.len) {
+ // This is potentially the last section, so we need to pad it out.
+ try self.file.pwriteAll(&[_]u8{0}, seg.inner.fileoff + seg.inner.filesize - 1);
+ }
self.load_commands_dirty = true;
- self.strtab_dirty = false;
}
pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const Module.Decl) !void {
@@ -846,14 +853,21 @@ pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const M
const tracy = trace(@src());
defer tracy.end();
+ log.debug("updateDeclLineNumber {s}{*}", .{ decl.name, decl });
+
const func = decl.val.castTag(.function).?.data;
- const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
+ log.debug(" (decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d})", .{
+ decl.src_line,
+ func.lbrace_line,
+ func.rbrace_line,
+ });
+ const line = @intCast(u28, decl.src_line + func.lbrace_line);
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].segment;
const shdr = &dwarf_segment.sections.items[self.debug_line_section_index.?];
const file_pos = shdr.offset + decl.fn_link.macho.off + getRelocDbgLineOff();
var data: [4]u8 = undefined;
- leb.writeUnsignedFixed(4, &data, line_off);
+ leb.writeUnsignedFixed(4, &data, line);
try self.file.pwriteAll(&data, file_pos);
}
@@ -886,7 +900,13 @@ pub fn initDeclDebugBuffers(
try dbg_line_buffer.ensureTotalCapacity(26);
const func = decl.val.castTag(.function).?.data;
- const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
+ log.debug("updateFunc {s}{*}", .{ decl.name, func.owner_decl });
+ log.debug(" (decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d})", .{
+ decl.src_line,
+ func.lbrace_line,
+ func.rbrace_line,
+ });
+ const line = @intCast(u28, decl.src_line + func.lbrace_line);
dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
DW.LNS.extended_op,
@@ -902,7 +922,7 @@ pub fn initDeclDebugBuffers(
// to this function's begin curly.
assert(getRelocDbgLineOff() == dbg_line_buffer.items.len);
// Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later.
- leb.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off);
+ leb.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line);
dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_file);
assert(getRelocDbgFileIndex() == dbg_line_buffer.items.len);
@@ -917,7 +937,7 @@ pub fn initDeclDebugBuffers(
// .debug_info subprogram
const decl_name_with_null = decl.name[0 .. mem.sliceTo(decl.name, 0).len + 1];
- try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len);
+ try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
@@ -945,8 +965,6 @@ pub fn initDeclDebugBuffers(
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT.name, DW.FORM.string
- mem.writeIntLittle(u32, dbg_info_buffer.addManyAsArrayAssumeCapacity(4), line_off + 1); // DW.AT.decl_line, DW.FORM.data4
- dbg_info_buffer.appendAssumeCapacity(file_index); // DW.AT.decl_file, DW.FORM.data1
},
else => {
// TODO implement .debug_info for global variables
@@ -966,7 +984,6 @@ pub fn commitDeclDebugInfo(
module: *Module,
decl: *Module.Decl,
debug_buffers: *DeclDebugBuffers,
- target: std.Target,
) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1097,14 +1114,26 @@ pub fn commitDeclDebugInfo(
if (dbg_info_buffer.items.len == 0)
return;
+ // We need this for the duration of this function only so that for composite
+ // types such as []const u32, if the type *u32 is non-existent, we create
+ // it synthetically and store the backing bytes in this arena. After we are
+ // done with the relocations, we can safely deinit the entire memory slab.
+ // TODO currently, we do not store the relocations for future use, however,
+ // if that is the case, we should move memory management to a higher scope,
+ // such as linker scope, or whatnot.
+ var dbg_type_arena = std.heap.ArenaAllocator.init(allocator);
+ defer dbg_type_arena.deinit();
+
{
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
- var it = dbg_info_type_relocs.iterator();
- while (it.next()) |entry| {
- entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len);
- try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer, target);
+ var it: usize = 0;
+ while (it < dbg_info_type_relocs.count()) : (it += 1) {
+ const ty = dbg_info_type_relocs.keys()[it];
+ const value_ptr = dbg_info_type_relocs.getPtr(ty).?;
+ value_ptr.off = @intCast(u32, dbg_info_buffer.items.len);
+ try self.addDbgInfoType(dbg_type_arena.allocator(), ty, dbg_info_buffer, dbg_info_type_relocs);
}
}
@@ -1129,24 +1158,25 @@ pub fn commitDeclDebugInfo(
/// Asserts the type has codegen bits.
fn addDbgInfoType(
self: *DebugSymbols,
+ arena: Allocator,
ty: Type,
dbg_info_buffer: *std.ArrayList(u8),
- target: std.Target,
+ dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
) !void {
- _ = self;
+ const target = self.base.base.options.target;
+ var relocs = std.ArrayList(struct { ty: Type, reloc: u32 }).init(arena);
+
switch (ty.zigTypeTag()) {
- .Void => unreachable,
.NoReturn => unreachable,
+ .Void => {
+ try dbg_info_buffer.append(abbrev_pad1);
+ },
.Bool => {
try dbg_info_buffer.appendSlice(&[_]u8{
abbrev_base_type,
DW.ATE.boolean, // DW.AT.encoding , DW.FORM.data1
1, // DW.AT.byte_size, DW.FORM.data1
- 'b',
- 'o',
- 'o',
- 'l',
- 0, // DW.AT.name, DW.FORM.string
+ 'b', 'o', 'o', 'l', 0, // DW.AT.name, DW.FORM.string
});
},
.Int => {
@@ -1163,11 +1193,120 @@ fn addDbgInfoType(
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
},
+ .Optional => {
+ if (ty.isPtrLikeOptional()) {
+ try dbg_info_buffer.ensureUnusedCapacity(12);
+ dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
+ // DW.AT.encoding, DW.FORM.data1
+ dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
+ // DW.AT.byte_size, DW.FORM.data1
+ dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target)));
+ // DW.AT.name, DW.FORM.string
+ try dbg_info_buffer.writer().print("{}\x00", .{ty});
+ } else {
+ log.debug("TODO implement .debug_info for type '{}'", .{ty});
+ try dbg_info_buffer.append(abbrev_pad1);
+ }
+ },
+ .Pointer => {
+ if (ty.isSlice()) {
+ // Slices are anonymous structs: struct { .ptr = *, .len = N }
+ try dbg_info_buffer.ensureUnusedCapacity(23);
+ // DW.AT.structure_type
+ dbg_info_buffer.appendAssumeCapacity(abbrev_anon_struct_type);
+ // DW.AT.byte_size, DW.FORM.sdata
+ dbg_info_buffer.appendAssumeCapacity(16);
+ // DW.AT.member
+ dbg_info_buffer.appendAssumeCapacity(abbrev_struct_member);
+ // DW.AT.name, DW.FORM.string
+ dbg_info_buffer.appendSliceAssumeCapacity("ptr");
+ dbg_info_buffer.appendAssumeCapacity(0);
+ // DW.AT.type, DW.FORM.ref4
+ var index = dbg_info_buffer.items.len;
+ try dbg_info_buffer.resize(index + 4);
+ var buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
+ const ptr_ty = ty.slicePtrFieldType(buf);
+ try relocs.append(.{ .ty = ptr_ty, .reloc = @intCast(u32, index) });
+ // DW.AT.data_member_location, DW.FORM.sdata
+ dbg_info_buffer.appendAssumeCapacity(0);
+ // DW.AT.member
+ dbg_info_buffer.appendAssumeCapacity(abbrev_struct_member);
+ // DW.AT.name, DW.FORM.string
+ dbg_info_buffer.appendSliceAssumeCapacity("len");
+ dbg_info_buffer.appendAssumeCapacity(0);
+ // DW.AT.type, DW.FORM.ref4
+ index = dbg_info_buffer.items.len;
+ try dbg_info_buffer.resize(index + 4);
+ try relocs.append(.{ .ty = Type.initTag(.usize), .reloc = @intCast(u32, index) });
+ // DW.AT.data_member_location, DW.FORM.sdata
+ dbg_info_buffer.appendAssumeCapacity(8);
+ // DW.AT.structure_type delimit children
+ dbg_info_buffer.appendAssumeCapacity(0);
+ } else {
+ try dbg_info_buffer.ensureUnusedCapacity(5);
+ dbg_info_buffer.appendAssumeCapacity(abbrev_ptr_type);
+ // DW.AT.type, DW.FORM.ref4
+ const index = dbg_info_buffer.items.len;
+ try dbg_info_buffer.resize(index + 4);
+ try relocs.append(.{ .ty = ty.childType(), .reloc = @intCast(u32, index) });
+ }
+ },
+ .Struct => blk: {
+ // try dbg_info_buffer.ensureUnusedCapacity(23);
+ // DW.AT.structure_type
+ try dbg_info_buffer.append(abbrev_struct_type);
+ // DW.AT.byte_size, DW.FORM.sdata
+ const abi_size = ty.abiSize(target);
+ try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
+ // DW.AT.name, DW.FORM.string
+ const struct_name = try ty.nameAlloc(arena);
+ try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
+ dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
+ dbg_info_buffer.appendAssumeCapacity(0);
+
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ if (struct_obj.layout == .Packed) {
+ log.debug("TODO implement .debug_info for packed structs", .{});
+ break :blk;
+ }
+
+ const fields = ty.structFields();
+ for (fields.keys()) |field_name, field_index| {
+ const field = fields.get(field_name).?;
+ // DW.AT.member
+ try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
+ dbg_info_buffer.appendAssumeCapacity(abbrev_struct_member);
+ // DW.AT.name, DW.FORM.string
+ dbg_info_buffer.appendSliceAssumeCapacity(field_name);
+ dbg_info_buffer.appendAssumeCapacity(0);
+ // DW.AT.type, DW.FORM.ref4
+ var index = dbg_info_buffer.items.len;
+ try dbg_info_buffer.resize(index + 4);
+ try relocs.append(.{ .ty = field.ty, .reloc = @intCast(u32, index) });
+ // DW.AT.data_member_location, DW.FORM.sdata
+ const field_off = ty.structFieldOffset(field_index, target);
+ try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
+ }
+
+ // DW.AT.structure_type delimit children
+ try dbg_info_buffer.append(0);
+ },
else => {
- std.log.scoped(.compiler).err("TODO implement .debug_info for type '{}'", .{ty});
+ log.debug("TODO implement .debug_info for type '{}'", .{ty});
try dbg_info_buffer.append(abbrev_pad1);
},
}
+
+ for (relocs.items) |rel| {
+ const gop = try dbg_info_type_relocs.getOrPut(self.base.base.allocator, rel.ty);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .off = undefined,
+ .relocs = .{},
+ };
+ }
+ try gop.value_ptr.relocs.append(self.base.base.allocator, rel.reloc);
+ }
}
fn updateDeclDebugInfoAllocation(
From d164865308fcc0870c21da30431caf5d15a4f978 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 14 Feb 2022 12:26:15 -0700
Subject: [PATCH 0183/2031] add missing source file to CMakeLists.txt
---
CMakeLists.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a6edfa04ac..505de972b9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -655,6 +655,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/print_env.zig"
"${CMAKE_SOURCE_DIR}/src/print_targets.zig"
"${CMAKE_SOURCE_DIR}/src/print_zir.zig"
+ "${CMAKE_SOURCE_DIR}/src/register_manager.zig"
"${CMAKE_SOURCE_DIR}/src/stage1.zig"
"${CMAKE_SOURCE_DIR}/src/target.zig"
"${CMAKE_SOURCE_DIR}/src/tracy.zig"
From 1e49d1fca89a0fce1604a8188257fa6ea2749338 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 14 Feb 2022 12:26:55 -0700
Subject: [PATCH 0184/2031] langref: correct info about type info of
declarations
---
doc/langref.html.in | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index fd104db6da..c0b8c9cb48 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9513,9 +9513,14 @@ test "integer truncation" {
Provides type reflection.
- For {#link|structs|struct#}, {#link|unions|union#}, {#link|enums|enum#}, and
- {#link|error sets|Error Set Type#}, the fields are guaranteed to be in the same
- order as declared. For declarations, the order is unspecified.
+ Type information of {#link|structs|struct#}, {#link|unions|union#}, {#link|enums|enum#}, and
+ {#link|error sets|Error Set Type#} has fields which are are guaranteed to be in the same
+ order as appearance in the source file.
+
+
+ Type information of {#link|structs|struct#}, {#link|unions|union#}, {#link|enums|enum#}, and
+ {#link|opaques|opaque#} has declarations, which are also guaranteed to be in the same
+ order as appearance in the source file.
{#header_close#}
From 7b938767bb18535a870d0460c9f4d9e3d93ab053 Mon Sep 17 00:00:00 2001
From: ominitay <37453713+ominitay@users.noreply.github.com>
Date: Sun, 30 Jan 2022 13:22:49 +0000
Subject: [PATCH 0185/2031] std.os: throw compile error for `argv` on Windows
On Windows, `argv` is not populated by start code, and instead left as undefined. This is problematic, and can lead to incorrect programs compiling, but panicking when trying to access `argv`. This change causes these programs to produce a compile error on Windows instead, which is far preferable to a runtime panic.
---
lib/std/os.zig | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 16a32766dc..d7f60c1e1a 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -218,9 +218,13 @@ pub const socket_t = if (builtin.os.tag == .windows) windows.ws2_32.SOCKET else
pub var environ: [][*:0]u8 = undefined;
/// Populated by startup code before main().
-/// Not available on Windows. See `std.process.args`
-/// for obtaining the process arguments.
-pub var argv: [][*:0]u8 = undefined;
+/// Not available on WASI or Windows without libc. See `std.process.argsAlloc`
+/// or `std.process.argsWithAllocator` for a cross-platform alternative.
+pub var argv: [][*:0]u8 = if (builtin.link_libc) undefined else switch (builtin.os.tag) {
+ .windows => @compileError("argv isn't supported on Windows: use std.process.argsAlloc instead"),
+ .wasi => @compileError("argv isn't supported on WASI: use std.process.argsAlloc instead"),
+ else => undefined,
+};
/// To obtain errno, call this function with the return value of the
/// system function call. For some systems this will obtain the value directly
From 0d16e908fbb93cdaee80bc2514f76a09736bfa04 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Tue, 1 Feb 2022 18:57:51 +0100
Subject: [PATCH 0186/2031] stage2 AArch64: implement is_err/is_non_err for
simple error unions
---
src/arch/aarch64/CodeGen.zig | 93 ++++++++++++++++++++++++++++--------
1 file changed, 73 insertions(+), 20 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 79fa38e275..fd9211d919 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1104,7 +1104,13 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = error_union_ty.errorUnionPayload();
+ if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
+
+ return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -2008,18 +2014,52 @@ fn isNonNull(self: *Self, operand: MCValue) !MCValue {
return self.fail("TODO call isNull and invert the result", .{});
}
-fn isErr(self: *Self, operand: MCValue) !MCValue {
+fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
_ = operand;
- // Here you can specialize this instruction if it makes sense to, otherwise the default
- // will call isNonNull and invert the result.
- return self.fail("TODO call isNonErr and invert the result", .{});
+
+ const error_type = ty.errorUnionSet();
+ const payload_type = ty.errorUnionPayload();
+
+ if (!error_type.hasRuntimeBits()) {
+ return MCValue{ .immediate = 0 }; // always false
+ } else if (!payload_type.hasRuntimeBits()) {
+ if (error_type.abiSize(self.target.*) <= 8) {
+ const reg_mcv: MCValue = switch (operand) {
+ .register => operand,
+ else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
+ };
+
+ _ = try self.addInst(.{
+ .tag = .cmp_immediate,
+ .data = .{ .rr_imm12_sh = .{
+ .rd = .xzr,
+ .rn = reg_mcv.register,
+ .imm12 = 0,
+ } },
+ });
+
+ return MCValue{ .compare_flags_unsigned = .gt };
+ } else {
+ return self.fail("TODO isErr for errors with size > 8", .{});
+ }
+ } else {
+ return self.fail("TODO isErr for non-empty payloads", .{});
+ }
}
-fn isNonErr(self: *Self, operand: MCValue) !MCValue {
- _ = operand;
- // Here you can specialize this instruction if it makes sense to, otherwise the default
- // will call isNull and invert the result.
- return self.fail("TODO call isErr and invert the result", .{});
+fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
+ const is_err_result = try self.isErr(ty, operand);
+ switch (is_err_result) {
+ .compare_flags_unsigned => |op| {
+ assert(op == .gt);
+ return MCValue{ .compare_flags_unsigned = .lte };
+ },
+ .immediate => |imm| {
+ assert(imm == 0);
+ return MCValue{ .immediate = 1 };
+ },
+ else => unreachable,
+ }
}
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
@@ -2080,7 +2120,8 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
- break :result try self.isErr(operand);
+ const ty = self.air.typeOf(un_op);
+ break :result try self.isErr(ty, operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -2089,6 +2130,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
+ const ptr_ty = self.air.typeOf(un_op);
const operand: MCValue = blk: {
if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
@@ -2098,7 +2140,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
- break :result try self.isErr(operand);
+ break :result try self.isErr(ptr_ty.elemType(), operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -2107,7 +2149,8 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
- break :result try self.isNonErr(operand);
+ const ty = self.air.typeOf(un_op);
+ break :result try self.isNonErr(ty, operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -2116,6 +2159,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
+ const ptr_ty = self.air.typeOf(un_op);
const operand: MCValue = blk: {
if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
@@ -2125,7 +2169,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
- break :result try self.isNonErr(operand);
+ break :result try self.isNonErr(ptr_ty.elemType(), operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -2864,14 +2908,23 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
.ErrorUnion => {
const error_type = typed_value.ty.errorUnionSet();
const payload_type = typed_value.ty.errorUnionPayload();
- const sub_val = typed_value.val.castTag(.eu_payload).?.data;
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
+ if (typed_value.val.castTag(.eu_payload)) |pl| {
+ if (!payload_type.hasRuntimeBits()) {
+ // We use the error type directly as the type.
+ return MCValue{ .immediate = 0 };
+ }
+
+ _ = pl;
+ return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
+ } else {
+ if (!payload_type.hasRuntimeBits()) {
+ // We use the error type directly as the type.
+ return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
+ }
+
+ return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
}
-
- return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty});
},
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
}
From 77cf000438c8f65f089e9c64fbb881a9136f2931 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Tue, 1 Feb 2022 20:26:23 +0100
Subject: [PATCH 0187/2031] stage2 AArch64: implement loading from register
---
src/arch/aarch64/CodeGen.zig | 138 ++++++++++++++++++++++++++++++++++-
src/arch/aarch64/Mir.zig | 2 +-
2 files changed, 137 insertions(+), 3 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index fd9211d919..39eaa77ad5 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1301,8 +1301,64 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.embedded_in_code => {
return self.fail("TODO implement loading from MCValue.embedded_in_code", .{});
},
- .register => {
- return self.fail("TODO implement loading from MCValue.register for {}", .{self.target.cpu.arch});
+ .register => |addr_reg| {
+ self.register_manager.freezeRegs(&.{addr_reg});
+ defer self.register_manager.unfreezeRegs(&.{addr_reg});
+
+ switch (dst_mcv) {
+ .dead => unreachable,
+ .undef => unreachable,
+ .compare_flags_signed, .compare_flags_unsigned => unreachable,
+ .embedded_in_code => unreachable,
+ .stack_offset => |off| {
+ if (elem_ty.abiSize(self.target.*) <= 8) {
+ const tmp_reg = try self.register_manager.allocReg(null);
+ self.register_manager.freezeRegs(&.{tmp_reg});
+ defer self.register_manager.unfreezeRegs(&.{tmp_reg});
+
+ try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
+ try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
+ } else {
+ // TODO optimize the register allocation
+ const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
+ self.register_manager.freezeRegs(®s);
+ defer self.register_manager.unfreezeRegs(®s);
+
+ const src_reg = addr_reg;
+ const dst_reg = regs[0];
+ const len_reg = regs[1];
+ const count_reg = regs[2];
+ const tmp_reg = regs[3];
+
+ // sub dst_reg, fp, #off
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const adj_off = off + elem_size;
+ const offset = math.cast(u12, adj_off) catch return self.fail("TODO load: larger stack offsets", .{});
+ _ = try self.addInst(.{
+ .tag = .sub_immediate,
+ .data = .{ .rr_imm12_sh = .{
+ .rd = dst_reg,
+ .rn = .x29,
+ .imm12 = offset,
+ } },
+ });
+
+ // mov len, #elem_size
+ const len_imm = math.cast(u16, elem_size) catch return self.fail("TODO load: larger stack offsets", .{});
+ _ = try self.addInst(.{
+ .tag = .movk,
+ .data = .{ .r_imm16_sh = .{
+ .rd = len_reg,
+ .imm16 = len_imm,
+ } },
+ });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ }
+ },
+ else => return self.fail("TODO load from register into {}", .{dst_mcv}),
+ }
},
.memory,
.stack_offset,
@@ -1317,6 +1373,84 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
}
+fn genInlineMemcpy(
+ self: *Self,
+ src: Register,
+ dst: Register,
+ len: Register,
+ count: Register,
+ tmp: Register,
+) !void {
+ // movk count, #0
+ _ = try self.addInst(.{
+ .tag = .movk,
+ .data = .{ .r_imm16_sh = .{
+ .rd = count,
+ .imm16 = 0,
+ } },
+ });
+
+ // loop:
+ // cmp count, len
+ _ = try self.addInst(.{
+ .tag = .cmp_shifted_register,
+ .data = .{ .rrr_imm6_shift = .{
+ .rd = .xzr,
+ .rn = count,
+ .rm = len,
+ .imm6 = 0,
+ .shift = .lsl,
+ } },
+ });
+
+ // bge end
+ _ = try self.addInst(.{
+ .tag = .b_cond,
+ .data = .{ .inst_cond = .{
+ .inst = @intCast(u32, self.mir_instructions.len + 5),
+ .cond = .ge,
+ } },
+ });
+
+ // ldrb tmp, [src, count]
+ _ = try self.addInst(.{
+ .tag = .ldrb_register,
+ .data = .{ .load_store_register_register = .{
+ .rt = tmp,
+ .rn = src,
+ .offset = Instruction.LoadStoreOffset.reg(count).register,
+ } },
+ });
+
+ // strb tmp, [dest, count]
+ _ = try self.addInst(.{
+ .tag = .strb_register,
+ .data = .{ .load_store_register_register = .{
+ .rt = tmp,
+ .rn = dst,
+ .offset = Instruction.LoadStoreOffset.reg(count).register,
+ } },
+ });
+
+ // add count, count, #1
+ _ = try self.addInst(.{
+ .tag = .add_immediate,
+ .data = .{ .rr_imm12_sh = .{
+ .rd = count,
+ .rn = count,
+ .imm12 = 1,
+ } },
+ });
+
+ // b loop
+ _ = try self.addInst(.{
+ .tag = .b,
+ .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) },
+ });
+
+ // end:
+}
+
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 5546b32652..cd370c66ed 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -224,7 +224,7 @@ pub const Inst = struct {
},
/// A registers and a stack offset
///
- /// Used by e.g. str_register
+ /// Used by e.g. str_stack
load_store_stack: struct {
rt: Register,
offset: u32,
From 82f91adbb4ef77e8a2b05da923bb1d0c9c3a8262 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 5 Feb 2022 18:45:35 +0100
Subject: [PATCH 0188/2031] stage2 AArch64: Add madd, msub, mul, mneg
instructions
---
src/arch/aarch64/bits.zig | 61 +++++++++++++++++++++++++++++++++++++++
1 file changed, 61 insertions(+)
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
index 10eb919cb9..540a055c8e 100644
--- a/src/arch/aarch64/bits.zig
+++ b/src/arch/aarch64/bits.zig
@@ -332,6 +332,17 @@ pub const Instruction = union(enum) {
op: u1,
sf: u1,
},
+ data_processing_3_source: packed struct {
+ rd: u5,
+ rn: u5,
+ ra: u5,
+ o0: u1,
+ rm: u5,
+ op31: u3,
+ fixed: u5 = 0b11011,
+ op54: u2,
+ sf: u1,
+ },
pub const Shift = struct {
shift: Type = .lsl,
@@ -470,6 +481,7 @@ pub const Instruction = union(enum) {
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
.compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
.conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
+ .data_processing_3_source => |v| @bitCast(u32, v),
};
}
@@ -967,6 +979,33 @@ pub const Instruction = union(enum) {
};
}
+ fn dataProcessing3Source(
+ op54: u2,
+ op31: u3,
+ o0: u1,
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ ra: Register,
+ ) Instruction {
+ return Instruction{
+ .data_processing_3_source = .{
+ .rd = rd.id(),
+ .rn = rn.id(),
+ .ra = ra.id(),
+ .o0 = o0,
+ .rm = rm.id(),
+ .op31 = op31,
+ .op54 = op54,
+ .sf = switch (rd.size()) {
+ 32 => 0b0,
+ 64 => 0b1,
+ else => unreachable, // unexpected register size
+ },
+ },
+ };
+ }
+
// Helper functions for assembly syntax functions
// Move wide (immediate)
@@ -1245,6 +1284,24 @@ pub const Instruction = union(enum) {
pub fn csneg(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
return conditionalSelect(0b01, 0b1, 0b0, rd, rn, rm, cond);
}
+
+ // Data processing (3 source)
+
+ pub fn madd(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
+ return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra);
+ }
+
+ pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
+ return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra);
+ }
+
+ pub fn mul(rd: Register, rn: Register, rm: Register) Instruction {
+ return madd(rd, rn, rm, .xzr);
+ }
+
+ pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
+ return msub(rd, rn, rm, .xzr);
+ }
};
test {
@@ -1414,6 +1471,10 @@ test "serialize instructions" {
.inst = Instruction.csinc(.x1, .x2, .x4, .eq),
.expected = 0b1_0_0_11010100_00100_0000_0_1_00010_00001,
},
+ .{ // mul x1, x4, x9
+ .inst = Instruction.mul(.x1, .x4, .x9),
+ .expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
+ },
};
for (testcases) |case| {
From 8204ad193788f55b7fb5c5a2dd9a1902d964ed70 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 5 Feb 2022 20:51:11 +0100
Subject: [PATCH 0189/2031] stage2 AArch64: implement slice_len and
slice_elem_val
---
src/arch/aarch64/CodeGen.zig | 191 ++++++++++++++++++++++++++++++++++-
src/arch/aarch64/Emit.zig | 33 ++++--
src/arch/aarch64/Mir.zig | 14 ++-
3 files changed, 225 insertions(+), 13 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 39eaa77ad5..2c6f2b33b7 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1164,7 +1164,20 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_len for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mcv = try self.resolveInst(ty_op.operand);
+ switch (mcv) {
+ .dead, .unreach => unreachable,
+ .register => unreachable, // a slice doesn't fit in one register
+ .stack_offset => |off| {
+ break :result MCValue{ .stack_offset = off + 8 };
+ },
+ .memory => |addr| {
+ break :result MCValue{ .memory = addr + 8 };
+ },
+ else => return self.fail("TODO implement slice_len for {}", .{mcv}),
+ }
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1183,10 +1196,114 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_val for {}", .{self.target.cpu.arch});
+
+ if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+ const result: MCValue = result: {
+ const slice_mcv = try self.resolveInst(bin_op.lhs);
+
+ // TODO optimize for the case where the index is a constant,
+ // i.e. index_mcv == .immediate
+ const index_mcv = try self.resolveInst(bin_op.rhs);
+ const index_is_register = index_mcv == .register;
+
+ const slice_ty = self.air.typeOf(bin_op.lhs);
+ const elem_ty = slice_ty.childType();
+ const elem_size = elem_ty.abiSize(self.target.*);
+
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
+
+ if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register});
+ defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
+
+ const base_mcv: MCValue = switch (slice_mcv) {
+ .stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off + 8 }) },
+ else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
+ };
+ self.register_manager.freezeRegs(&.{base_mcv.register});
+
+ // TODO implement optimized ldr for airSliceElemVal
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+
+ const offset_mcv = try self.genMulConstant(bin_op.rhs, @intCast(u32, elem_size));
+ assert(offset_mcv == .register); // result of multiplication should always be register
+ self.register_manager.freezeRegs(&.{offset_mcv.register});
+
+ const addr_reg = try self.register_manager.allocReg(null);
+ self.register_manager.freezeRegs(&.{addr_reg});
+ defer self.register_manager.unfreezeRegs(&.{addr_reg});
+
+ _ = try self.addInst(.{
+ .tag = .add_shifted_register,
+ .data = .{ .rrr_imm6_shift = .{
+ .rd = addr_reg,
+ .rn = base_mcv.register,
+ .rm = offset_mcv.register,
+ .imm6 = 0,
+ .shift = .lsl,
+ } },
+ });
+
+ // At this point in time, neither the base register
+ // nor the offset register contains any valuable data
+ // anymore.
+ self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
+
+ try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
+
+ break :result dst_mcv;
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+fn genMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue {
+ const lhs = try self.resolveInst(op);
+ const rhs = MCValue{ .immediate = imm };
+
+ const lhs_is_register = lhs == .register;
+
+ if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
+ defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
+
+ // Destination must be a register
+ // LHS must be a register
+ // RHS must be a register
+ var dst_mcv: MCValue = undefined;
+ var lhs_mcv: MCValue = lhs;
+ var rhs_mcv: MCValue = rhs;
+
+ // Allocate registers for operands and/or destination
+ // Allocate 1 or 2 registers
+ if (lhs_is_register) {
+ // Move RHS to register
+ dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
+ rhs_mcv = dst_mcv;
+ } else {
+ // Move LHS and RHS to register
+ const regs = try self.register_manager.allocRegs(2, .{ null, null });
+ lhs_mcv = MCValue{ .register = regs[0] };
+ rhs_mcv = MCValue{ .register = regs[1] };
+ dst_mcv = lhs_mcv;
+ }
+
+ // Move the operands to the newly allocated registers
+ if (!lhs_is_register) {
+ try self.genSetReg(self.air.typeOf(op), lhs_mcv.register, lhs);
+ }
+ try self.genSetReg(Type.initTag(.usize), rhs_mcv.register, rhs);
+
+ _ = try self.addInst(.{
+ .tag = .mul,
+ .data = .{ .rrr = .{
+ .rd = dst_mcv.register,
+ .rn = lhs_mcv.register,
+ .rm = rhs_mcv.register,
+ } },
+ });
+
+ return dst_mcv;
+}
+
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -1310,6 +1427,16 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.undef => unreachable,
.compare_flags_signed, .compare_flags_unsigned => unreachable,
.embedded_in_code => unreachable,
+ .register => |dst_reg| {
+ _ = try self.addInst(.{
+ .tag = .ldr_immediate,
+ .data = .{ .load_store_register_immediate = .{
+ .rt = dst_reg,
+ .rn = addr_reg,
+ .offset = Instruction.LoadStoreOffset.none.immediate,
+ } },
+ });
+ },
.stack_offset => |off| {
if (elem_ty.abiSize(self.target.*) <= 8) {
const tmp_reg = try self.register_manager.allocReg(null);
@@ -2590,8 +2717,61 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (stack_offset == off)
return; // Copy stack variable to itself; nothing to do.
- const reg = try self.copyToTmpRegister(ty, mcv);
- return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ if (ty.abiSize(self.target.*) <= ptr_bytes) {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ } else {
+ // TODO optimize the register allocation
+ const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
+ self.register_manager.freezeRegs(®s);
+ defer self.register_manager.unfreezeRegs(®s);
+
+ const src_reg = regs[0];
+ const dst_reg = regs[1];
+ const len_reg = regs[2];
+ const count_reg = regs[3];
+ const tmp_reg = regs[4];
+
+ // sub src_reg, fp, #off
+ const adj_src_offset = off + @intCast(u32, ty.abiSize(self.target.*));
+ const src_offset = math.cast(u12, adj_src_offset) catch return self.fail("TODO load: larger stack offsets", .{});
+ _ = try self.addInst(.{
+ .tag = .sub_immediate,
+ .data = .{ .rr_imm12_sh = .{
+ .rd = src_reg,
+ .rn = .x29,
+ .imm12 = src_offset,
+ } },
+ });
+
+ // sub dst_reg, fp, #stack_offset
+ const adj_dst_off = stack_offset + @intCast(u32, ty.abiSize(self.target.*));
+ const dst_offset = math.cast(u12, adj_dst_off) catch return self.fail("TODO load: larger stack offsets", .{});
+ _ = try self.addInst(.{
+ .tag = .sub_immediate,
+ .data = .{ .rr_imm12_sh = .{
+ .rd = dst_reg,
+ .rn = .x29,
+ .imm12 = dst_offset,
+ } },
+ });
+
+ // mov len, #elem_size
+ const elem_size = @intCast(u32, ty.abiSize(self.target.*));
+ const len_imm = math.cast(u16, elem_size) catch return self.fail("TODO load: larger stack offsets", .{});
+ _ = try self.addInst(.{
+ .tag = .movk,
+ .data = .{ .r_imm16_sh = .{
+ .rd = len_reg,
+ .imm16 = len_imm,
+ } },
+ });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ }
},
}
}
@@ -2711,7 +2891,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} },
});
},
- else => return self.fail("TODO implement genSetReg other types abi_size={}", .{abi_size}),
+ 3 => return self.fail("TODO implement genSetReg types size 3", .{}),
+ else => unreachable,
}
},
else => return self.fail("TODO implement genSetReg for aarch64 {}", .{mcv}),
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 5b2610f508..140d0664b5 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -91,6 +91,7 @@ pub fn emitMir(
.call_extern => try emit.mirCallExtern(inst),
+ .add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cset => try emit.mirConditionalSelect(inst),
@@ -132,6 +133,8 @@ pub fn emitMir(
.movk => try emit.mirMoveWideImmediate(inst),
.movz => try emit.mirMoveWideImmediate(inst),
+ .mul => try emit.mirDataProcessing3Source(inst),
+
.nop => try emit.mirNop(),
.push_regs => try emit.mirPushPopRegs(inst),
@@ -201,6 +204,12 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
return 5 * 4;
}
},
+ .pop_regs, .push_regs => {
+ const reg_list = emit.mir.instructions.items(.data)[inst].reg_list;
+ const number_of_regs = @popCount(u32, reg_list);
+ const number_of_insts = std.math.divCeil(u6, number_of_regs, 2) catch unreachable;
+ return number_of_insts * 4;
+ },
.call_extern => return 4,
.dbg_line,
.dbg_epilogue_begin,
@@ -565,15 +574,15 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
+ const rd = rrr_imm6_shift.rd;
+ const rn = rrr_imm6_shift.rn;
+ const rm = rrr_imm6_shift.rm;
+ const shift = rrr_imm6_shift.shift;
+ const imm6 = rrr_imm6_shift.imm6;
switch (tag) {
- .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(
- rrr_imm6_shift.rd,
- rrr_imm6_shift.rn,
- rrr_imm6_shift.rm,
- rrr_imm6_shift.shift,
- rrr_imm6_shift.imm6,
- )),
+ .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)),
+ .add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)),
else => unreachable,
}
}
@@ -802,6 +811,16 @@ fn mirMoveWideImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirDataProcessing3Source(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const rrr = emit.mir.instructions.items(.data)[inst].rrr;
+
+ switch (tag) {
+ .mul => try emit.writeInstruction(Instruction.mul(rrr.rd, rrr.rn, rrr.rm)),
+ else => unreachable,
+ }
+}
+
fn mirNop(emit: *Emit) !void {
try emit.writeInstruction(Instruction.nop());
}
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index cd370c66ed..4f653ff072 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -26,6 +26,8 @@ pub const Inst = struct {
pub const Tag = enum(u16) {
/// Add (immediate)
add_immediate,
+ /// Add (shifted register)
+ add_shifted_register,
/// Branch conditionally
b_cond,
/// Branch
@@ -82,6 +84,8 @@ pub const Inst = struct {
movk,
/// Move wide with zero
movz,
+ /// Multiply
+ mul,
/// No Operation
nop,
/// Pseudo-instruction: Pop multiple registers
@@ -187,6 +191,14 @@ pub const Inst = struct {
imm12: u12,
sh: u1 = 0,
},
+ /// Two registers
+ ///
+ /// Used by e.g. mul
+ rrr: struct {
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ },
/// Three registers and a shift (shift type and 6-bit amount)
///
/// Used by e.g. cmp_shifted_register
@@ -208,7 +220,7 @@ pub const Inst = struct {
},
/// Two registers and a LoadStoreOffsetImmediate
///
- /// Used by e.g. str_register
+ /// Used by e.g. str_immediate
load_store_register_immediate: struct {
rt: Register,
rn: Register,
From f598d2ae056e72bda1efb3bc7d77e8183e95e191 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 5 Feb 2022 21:22:09 +0100
Subject: [PATCH 0190/2031] stage2 AArch64: implement unwrap_errunion_err and
struct_field_ptr
---
src/arch/aarch64/CodeGen.zig | 78 +++++++++++++++++++++++++++---------
1 file changed, 60 insertions(+), 18 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 2c6f2b33b7..bd6b875550 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1098,7 +1098,14 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = error_union_ty.errorUnionPayload();
+ const mcv = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasRuntimeBits()) break :result mcv;
+
+ return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1644,20 +1651,31 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void {
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- return self.structFieldPtr(extra.struct_operand, ty_pl.ty, extra.field_index);
+ const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index);
+ return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- return self.structFieldPtr(ty_op.operand, ty_op.ty, index);
+ const result = try self.structFieldPtr(inst, ty_op.operand, index);
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn structFieldPtr(self: *Self, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !void {
- _ = self;
- _ = operand;
- _ = ty;
- _ = index;
- return self.fail("TODO implement codegen struct_field_ptr", .{});
- //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
+
+fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
+ return if (self.liveness.isUnused(inst)) .dead else result: {
+ const mcv = try self.resolveInst(operand);
+ const struct_ty = self.air.typeOf(operand).childType();
+ const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_ty = struct_ty.structFieldType(index);
+ const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
+ switch (mcv) {
+ .ptr_stack_offset => |off| {
+ break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size };
+ },
+ else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
+ }
+ };
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
@@ -1754,10 +1772,16 @@ fn airFence(self: *Self) !void {
fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
- const fn_ty = self.air.typeOf(pl_op.operand);
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const ty = self.air.typeOf(callee);
+
+ const fn_ty = switch (ty.zigTypeTag()) {
+ .Fn => ty,
+ .Pointer => ty.childType(),
+ else => unreachable,
+ };
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
@@ -1821,7 +1845,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
- return self.fail("TODO implement calling runtime known function pointer", .{});
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+ try self.genSetReg(Type.initTag(.usize), .x30, mcv);
+
+ _ = try self.addInst(.{
+ .tag = .blr,
+ .data = .{ .reg = .x30 },
+ });
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
for (info.args) |mc_arg, arg_i| {
@@ -2008,12 +2039,23 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+
const ty = self.air.typeOf(bin_op.lhs);
- assert(ty.eql(self.air.typeOf(bin_op.rhs)));
- if (ty.zigTypeTag() == .ErrorSet)
- return self.fail("TODO implement cmp for errors", .{});
+
+ if (ty.abiSize(self.target.*) > 8) {
+ return self.fail("TODO cmp for types with size > 8", .{});
+ }
+
+ const signedness: std.builtin.Signedness = blk: {
+ // by default we tell the operand type is unsigned (i.e. bools and enum values)
+ if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+
+ // incase of an actual integer, we emit the correct signedness
+ break :blk ty.intInfo(self.target.*).signedness;
+ };
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -2089,9 +2131,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
else => unreachable,
}
- break :result switch (ty.isSignedInt()) {
- true => MCValue{ .compare_flags_signed = op },
- false => MCValue{ .compare_flags_unsigned = op },
+ break :result switch (signedness) {
+ .signed => MCValue{ .compare_flags_signed = op },
+ .unsigned => MCValue{ .compare_flags_unsigned = op },
};
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
From f47245865eea35fa0b08cb2a87e3620fa904dd88 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Tue, 8 Feb 2022 19:57:01 +0100
Subject: [PATCH 0191/2031] stage2 AArch64: minor refactors in Mir + Emit
---
src/arch/aarch64/CodeGen.zig | 79 ++++++++++++++----------------------
src/arch/aarch64/Emit.zig | 73 +++++++++++++++------------------
src/arch/aarch64/Mir.zig | 24 +++++++----
3 files changed, 78 insertions(+), 98 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index bd6b875550..4e14760286 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -2121,8 +2121,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.immediate => |imm| {
_ = try self.addInst(.{
.tag = .cmp_immediate,
- .data = .{ .rr_imm12_sh = .{
- .rd = .xzr,
+ .data = .{ .r_imm12_sh = .{
.rn = lhs_mcv.register,
.imm12 = @intCast(u12, imm),
} },
@@ -2334,8 +2333,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
_ = try self.addInst(.{
.tag = .cmp_immediate,
- .data = .{ .rr_imm12_sh = .{
- .rd = .xzr,
+ .data = .{ .r_imm12_sh = .{
.rn = reg_mcv.register,
.imm12 = 0,
} },
@@ -2559,7 +2557,16 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
- block_data.mcv = operand_mcv;
+ block_data.mcv = switch (operand_mcv) {
+ .none, .dead, .unreach => unreachable,
+ .register, .stack_offset, .memory => operand_mcv,
+ .immediate => blk: {
+ const new_mcv = try self.allocRegOrMem(block, true);
+ try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
+ break :blk new_mcv;
+ },
+ else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}),
+ };
} else {
try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv);
}
@@ -2845,10 +2852,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
_ = try self.addInst(.{
.tag = .cset,
- .data = .{ .rrr_cond = .{
+ .data = .{ .r_cond = .{
.rd = reg,
- .rn = .xzr,
- .rm = .xzr,
.cond = condition,
} },
});
@@ -2933,7 +2938,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} },
});
},
- 3 => return self.fail("TODO implement genSetReg types size 3", .{}),
+ 3, 5, 6, 7 => return self.fail("TODO implement genSetReg types size {}", .{abi_size}),
else => unreachable,
}
},
@@ -3114,27 +3119,6 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-/// If the MCValue is an immediate, and it does not fit within this type,
-/// we put it in a register.
-/// A potential opportunity for future optimization here would be keeping track
-/// of the fact that the instruction is available both as an immediate
-/// and as a register.
-fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue {
- const mcv = try self.resolveInst(operand);
- const ti = @typeInfo(T).Int;
- switch (mcv) {
- .immediate => |imm| {
- // This immediate is unsigned.
- const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed));
- if (imm >= math.maxInt(U)) {
- return MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.usize), mcv) };
- }
- },
- else => {},
- }
- return mcv;
-}
-
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
@@ -3248,19 +3232,11 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
}
},
.ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
+ const err_name = typed_value.val.castTag(.@"error").?.data.name;
+ const module = self.bin_file.options.module.?;
+ const global_error_set = module.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return MCValue{ .immediate = error_index };
},
.ErrorUnion => {
const error_type = typed_value.ty.errorUnionSet();
@@ -3425,13 +3401,18 @@ fn parseRegName(name: []const u8) ?Register {
}
fn registerAlias(reg: Register, size_bytes: u32) Register {
- _ = size_bytes;
-
- return reg;
+ if (size_bytes == 0) {
+ unreachable; // should be comptime known
+ } else if (size_bytes <= 4) {
+ return reg.to32();
+ } else if (size_bytes <= 8) {
+ return reg.to64();
+ } else {
+ unreachable; // TODO handle floating-point registers
+ }
}
-/// For most architectures this does nothing. For x86_64 it resolves any aliased registers
-/// to the 64-bit wide ones.
+/// Resolves any aliased registers to the 64-bit wide ones.
fn toCanonicalReg(reg: Register) Register {
- return reg;
+ return reg.to64();
}
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 140d0664b5..786fa4b3f3 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -423,27 +423,30 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
- const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh;
-
switch (tag) {
- .add_immediate => try emit.writeInstruction(Instruction.add(
- rr_imm12_sh.rd,
- rr_imm12_sh.rn,
- rr_imm12_sh.imm12,
- rr_imm12_sh.sh == 1,
- )),
- .cmp_immediate => try emit.writeInstruction(Instruction.subs(
- rr_imm12_sh.rd,
- rr_imm12_sh.rn,
- rr_imm12_sh.imm12,
- rr_imm12_sh.sh == 1,
- )),
- .sub_immediate => try emit.writeInstruction(Instruction.sub(
- rr_imm12_sh.rd,
- rr_imm12_sh.rn,
- rr_imm12_sh.imm12,
- rr_imm12_sh.sh == 1,
- )),
+ .add_immediate,
+ .sub_immediate,
+ => {
+ const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh;
+ const rd = rr_imm12_sh.rd;
+ const rn = rr_imm12_sh.rn;
+ const imm12 = rr_imm12_sh.imm12;
+ const sh = rr_imm12_sh.sh == 1;
+
+ switch (tag) {
+ .add_immediate => try emit.writeInstruction(Instruction.add(rd, rn, imm12, sh)),
+ .sub_immediate => try emit.writeInstruction(Instruction.sub(rd, rn, imm12, sh)),
+ else => unreachable,
+ }
+ },
+ .cmp_immediate => {
+ const r_imm12_sh = emit.mir.instructions.items(.data)[inst].r_imm12_sh;
+ const rn = r_imm12_sh.rn;
+ const imm12 = r_imm12_sh.imm12;
+ const sh = r_imm12_sh.sh == 1;
+
+ try emit.writeInstruction(Instruction.subs(.xzr, rn, imm12, sh));
+ },
else => unreachable,
}
}
@@ -589,15 +592,11 @@ fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
- const rrr_cond = emit.mir.instructions.items(.data)[inst].rrr_cond;
-
switch (tag) {
- .cset => try emit.writeInstruction(Instruction.csinc(
- rrr_cond.rd,
- rrr_cond.rn,
- rrr_cond.rm,
- rrr_cond.cond,
- )),
+ .cset => {
+ const r_cond = emit.mir.instructions.items(.data)[inst].r_cond;
+ try emit.writeInstruction(Instruction.csinc(r_cond.rd, .xzr, .xzr, r_cond.cond));
+ },
else => unreachable,
}
}
@@ -662,20 +661,14 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirLoadStoreRegisterPair(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const load_store_register_pair = emit.mir.instructions.items(.data)[inst].load_store_register_pair;
+ const rt = load_store_register_pair.rt;
+ const rt2 = load_store_register_pair.rt2;
+ const rn = load_store_register_pair.rn;
+ const offset = load_store_register_pair.offset;
switch (tag) {
- .stp => try emit.writeInstruction(Instruction.stp(
- load_store_register_pair.rt,
- load_store_register_pair.rt2,
- load_store_register_pair.rn,
- load_store_register_pair.offset,
- )),
- .ldp => try emit.writeInstruction(Instruction.ldp(
- load_store_register_pair.rt,
- load_store_register_pair.rt2,
- load_store_register_pair.rn,
- load_store_register_pair.offset,
- )),
+ .stp => try emit.writeInstruction(Instruction.stp(rt, rt2, rn, offset)),
+ .ldp => try emit.writeInstruction(Instruction.ldp(rt, rt2, rn, offset)),
else => unreachable,
}
}
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 4f653ff072..6bb681acb8 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -175,6 +175,13 @@ pub const Inst = struct {
imm16: u16,
hw: u2 = 0,
},
+ /// A register and a condition
+ ///
+ /// Used by e.g. cset
+ r_cond: struct {
+ rd: Register,
+ cond: bits.Instruction.Condition,
+ },
/// Two registers
///
/// Used by e.g. mov_register
@@ -182,6 +189,14 @@ pub const Inst = struct {
rd: Register,
rn: Register,
},
+ /// A register, an unsigned 12-bit immediate, and an optional shift
+ ///
+ /// Used by e.g. cmp_immediate
+ r_imm12_sh: struct {
+ rn: Register,
+ imm12: u12,
+ sh: u1 = 0,
+ },
/// Two registers, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. sub_immediate
@@ -209,15 +224,6 @@ pub const Inst = struct {
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
},
- /// Three registers and a condition
- ///
- /// Used by e.g. cset
- rrr_cond: struct {
- rd: Register,
- rn: Register,
- rm: Register,
- cond: bits.Instruction.Condition,
- },
/// Two registers and a LoadStoreOffsetImmediate
///
/// Used by e.g. str_immediate
From 8bfc4b2f9cf4798fe7731288c652145803ef1998 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Tue, 8 Feb 2022 20:15:09 +0100
Subject: [PATCH 0192/2031] stage2 AArch64: extract store out of airStore for
recursive calls
---
src/arch/aarch64/CodeGen.zig | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 4e14760286..8e5e620658 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1611,11 +1611,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airStore(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr = try self.resolveInst(bin_op.lhs);
- const value = try self.resolveInst(bin_op.rhs);
- const elem_ty = self.air.typeOf(bin_op.rhs);
+fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -1624,13 +1620,13 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void {
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.immediate => |imm| {
- try self.setRegOrMem(elem_ty, .{ .memory = imm }, value);
+ try self.setRegOrMem(value_ty, .{ .memory = imm }, value);
},
.ptr_stack_offset => |off| {
- try self.genSetStack(elem_ty, off, value);
+ try self.genSetStack(value_ty, off, value);
},
.ptr_embedded_in_code => |off| {
- try self.setRegOrMem(elem_ty, .{ .embedded_in_code = off }, value);
+ try self.setRegOrMem(value_ty, .{ .embedded_in_code = off }, value);
},
.embedded_in_code => {
return self.fail("TODO implement storing to MCValue.embedded_in_code", .{});
@@ -1638,13 +1634,24 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void {
.register => {
return self.fail("TODO implement storing to MCValue.register", .{});
},
- .memory => {
- return self.fail("TODO implement storing to MCValue.memory", .{});
- },
- .stack_offset => {
- return self.fail("TODO implement storing to MCValue.stack_offset", .{});
+ .memory,
+ .stack_offset,
+ => {
+ const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr);
+ try self.store(.{ .register = addr_reg }, value, ptr_ty, value_ty);
},
}
+}
+
+fn airStore(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const ptr = try self.resolveInst(bin_op.lhs);
+ const value = try self.resolveInst(bin_op.rhs);
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const value_ty = self.air.typeOf(bin_op.rhs);
+
+ try self.store(ptr, value, ptr_ty, value_ty);
+
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
From edb2a75982a011dc883678ca57efa8c3f6be5466 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Wed, 9 Feb 2022 23:19:28 +0100
Subject: [PATCH 0193/2031] stage2 AArch64: Implement binOp for add, sub
---
src/arch/aarch64/CodeGen.zig | 254 +++++++++++++++++++++++++++++++++--
src/arch/aarch64/Emit.zig | 4 +-
src/arch/aarch64/Mir.zig | 2 +
3 files changed, 249 insertions(+), 11 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 8e5e620658..7c207039b9 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -511,10 +511,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
- .add, .ptr_add => try self.airAdd(inst),
+ .add, .ptr_add => try self.airBinOp(inst),
.addwrap => try self.airAddWrap(inst),
.add_sat => try self.airAddSat(inst),
- .sub, .ptr_sub => try self.airSub(inst),
+ .sub, .ptr_sub => try self.airBinOp(inst),
.subwrap => try self.airSubWrap(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airMul(inst),
@@ -950,9 +950,249 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
+/// Don't call this function directly. Use binOp instead.
+///
+/// Calling this function signals an intention to generate a Mir
+/// instruction of the form
+///
+/// op dest, lhs, rhs
+///
+/// Asserts that generating an instruction of that form is possible.
+fn binOpRegister(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ maybe_inst: ?Air.Inst.Index,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ rhs_ty: Type,
+) !MCValue {
+ const lhs_is_register = lhs == .register;
+ const rhs_is_register = rhs == .register;
+
+ if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
+ if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
+
+ const lhs_reg = if (lhs_is_register) lhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ break :inst Air.refToIndex(bin_op.lhs).?;
+ } else null;
+ const reg = try self.register_manager.allocReg(track_inst);
+ self.register_manager.freezeRegs(&.{reg});
+ break :blk reg;
+ };
+ defer self.register_manager.unfreezeRegs(&.{lhs_reg});
+
+ const rhs_reg = if (rhs_is_register) rhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ break :inst Air.refToIndex(bin_op.rhs).?;
+ } else null;
+ const reg = try self.register_manager.allocReg(track_inst);
+ self.register_manager.freezeRegs(&.{reg});
+ break :blk reg;
+ };
+ defer self.register_manager.unfreezeRegs(&.{rhs_reg});
+
+ const dest_reg = if (maybe_inst) |inst| blk: {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+
+ if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
+ break :blk lhs_reg;
+ } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
+ break :blk rhs_reg;
+ } else {
+ break :blk try self.register_manager.allocReg(inst);
+ }
+ } else try self.register_manager.allocReg(null);
+
+ if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
+ if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .add => .add_shifted_register,
+ .sub => .sub_shifted_register,
+ else => unreachable,
+ };
+ const mir_data: Mir.Inst.Data = switch (tag) {
+ .add,
+ .sub,
+ => .{ .rrr_imm6_shift = .{
+ .rd = dest_reg,
+ .rn = lhs_reg,
+ .rm = rhs_reg,
+ .imm6 = 0,
+ .shift = .lsl,
+ } },
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = mir_tag,
+ .data = mir_data,
+ });
+
+ return MCValue{ .register = dest_reg };
+}
+
+/// Don't call this function directly. Use binOp instead.
+///
+/// Calling this function signals an intention to generate a Mir
+/// instruction of the form
+///
+/// op dest, lhs, #rhs_imm
+///
+/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to
+/// rhs and vice versa. This parameter is only used when maybe_inst !=
+/// null.
+///
+/// Asserts that generating an instruction of that form is possible.
+fn binOpImmediate(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ maybe_inst: ?Air.Inst.Index,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ lhs_and_rhs_swapped: bool,
+) !MCValue {
+ const lhs_is_register = lhs == .register;
+
+ if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
+
+ const lhs_reg = if (lhs_is_register) lhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ break :inst Air.refToIndex(
+ if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
+ ).?;
+ } else null;
+ const reg = try self.register_manager.allocReg(track_inst);
+ self.register_manager.freezeRegs(&.{reg});
+ break :blk reg;
+ };
+ defer self.register_manager.unfreezeRegs(&.{lhs_reg});
+
+ const dest_reg = if (maybe_inst) |inst| blk: {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+
+ if (lhs_is_register and self.reuseOperand(
+ inst,
+ if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
+ if (lhs_and_rhs_swapped) 1 else 0,
+ lhs,
+ )) {
+ break :blk lhs_reg;
+ } else {
+ break :blk try self.register_manager.allocReg(inst);
+ }
+ } else try self.register_manager.allocReg(null);
+
+ if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .add => .add_immediate,
+ .sub => .sub_immediate,
+ else => unreachable,
+ };
+ const mir_data: Mir.Inst.Data = switch (tag) {
+ .add,
+ .sub,
+ => .{ .rr_imm12_sh = .{
+ .rd = dest_reg,
+ .rn = lhs_reg,
+ .imm12 = @intCast(u12, rhs.immediate),
+ } },
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = mir_tag,
+ .data = mir_data,
+ });
+
+ return MCValue{ .register = dest_reg };
+}
+
+/// For all your binary operation needs, this function will generate
+/// the corresponding Mir instruction(s). Returns the location of the
+/// result.
+///
+/// If the binary operation itself happens to be an Air instruction,
+/// pass the corresponding index in the inst parameter. That helps
+/// this function do stuff like reusing operands.
+///
+/// This function does not do any lowering to Mir itself, but instead
+/// looks at the lhs and rhs and determines which kind of lowering
+/// would be best suitable and then delegates the lowering to other
+/// functions.
+fn binOp(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ maybe_inst: ?Air.Inst.Index,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ rhs_ty: Type,
+) !MCValue {
+ switch (tag) {
+ .add,
+ .sub,
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO binary operations on floats", .{}),
+ .Vector => return self.fail("TODO binary operations on vectors", .{}),
+ .Int => {
+ assert(lhs_ty.eql(rhs_ty));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 64) {
+ // Only say yes if the operation is
+ // commutative, i.e. we can swap both of the
+ // operands
+ const lhs_immediate_ok = switch (tag) {
+ .add => lhs == .immediate and lhs.immediate <= std.math.maxInt(u12),
+ .sub => false,
+ else => unreachable,
+ };
+ const rhs_immediate_ok = switch (tag) {
+ .add,
+ .sub,
+ => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
+ else => unreachable,
+ };
+
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediate(tag, maybe_inst, lhs, rhs, lhs_ty, false);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ return try self.binOpImmediate(tag, maybe_inst, rhs, lhs, rhs_ty, true);
+ } else {
+ return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
+ }
+ } else {
+ return self.fail("TODO binary operations on int with bits > 64", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
+ .ptr_add,
+ .ptr_sub,
+ => return self.fail("TODO ptr_add, ptr_sub", .{}),
+ else => unreachable,
+ }
+}
+
+fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
+ const tag = self.air.instructions.items(.tag)[inst];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add for {}", .{self.target.cpu.arch});
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
+ const rhs_ty = self.air.typeOf(bin_op.rhs);
+
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -968,12 +1208,6 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airSub(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch});
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 786fa4b3f3..1b30c78562 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -93,6 +93,7 @@ pub fn emitMir(
.add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
+ .sub_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cset => try emit.mirConditionalSelect(inst),
@@ -584,8 +585,9 @@ fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const imm6 = rrr_imm6_shift.imm6;
switch (tag) {
- .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)),
.add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)),
+ .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)),
+ .sub_shifted_register => try emit.writeInstruction(Instruction.subShiftedRegister(rd, rn, rm, shift, imm6)),
else => unreachable,
}
}
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 6bb681acb8..d2263c6e32 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -116,6 +116,8 @@ pub const Inst = struct {
strh_register,
/// Subtract (immediate)
sub_immediate,
+ /// Subtract (shifted register)
+ sub_shifted_register,
/// Supervisor Call
svc,
};
From 3a33f313347f3fa151ba3a90c1c3b14eee3d1d1e Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 12 Feb 2022 14:31:25 +0100
Subject: [PATCH 0194/2031] stage2 AArch64: implement cond_br for other
MCValues
---
src/arch/aarch64/CodeGen.zig | 32 ++++++++++++++++++------
src/arch/aarch64/Emit.zig | 47 +++++++++++++++++++++++++++++++-----
src/arch/aarch64/Mir.zig | 9 +++++++
test/stage2/aarch64.zig | 25 +++++++++++++++++++
4 files changed, 99 insertions(+), 14 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 7c207039b9..fade038eb0 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -923,12 +923,12 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
};
break :result r;
},
- else => {},
+ else => {
+ return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch});
+ },
}
-
- return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch});
};
- _ = result;
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airMin(self: *Self, inst: Air.Inst.Index) !void {
@@ -1411,7 +1411,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
.dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_offset => |off| {
- break :result MCValue{ .stack_offset = off + 8 };
+ break :result MCValue{ .stack_offset = off };
},
.memory => |addr| {
break :result MCValue{ .memory = addr + 8 };
@@ -2425,7 +2425,22 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
},
},
}),
- else => return self.fail("TODO implement condbr when condition is {s}", .{@tagName(cond)}),
+ else => blk: {
+ const reg = switch (cond) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.bool, cond),
+ };
+
+ break :blk try self.addInst(.{
+ .tag = .cbz,
+ .data = .{
+ .r_inst = .{
+ .rt = reg,
+ .inst = undefined, // populated later through performReloc
+ },
+ },
+ });
+ },
};
// Capture the state of register and stack allocation state so that we can revert to it.
@@ -2770,8 +2785,9 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
const tag = self.mir_instructions.items(.tag)[inst];
switch (tag) {
- .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Air.Inst.Index, self.mir_instructions.len),
- .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len),
+ .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
+ .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
+ .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
else => unreachable,
}
}
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 1b30c78562..1772f08aa8 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -50,11 +50,13 @@ const InnerError = error{
};
const BranchType = enum {
+ cbz,
b_cond,
unconditional_branch_immediate,
fn default(tag: Mir.Inst.Tag) BranchType {
return switch (tag) {
+ .cbz => .cbz,
.b, .bl => .unconditional_branch_immediate,
.b_cond => .b_cond,
else => unreachable,
@@ -83,6 +85,8 @@ pub fn emitMir(
.b => try emit.mirBranch(inst),
.bl => try emit.mirBranch(inst),
+ .cbz => try emit.mirCompareAndBranch(inst),
+
.blr => try emit.mirUnconditionalBranchRegister(inst),
.ret => try emit.mirUnconditionalBranchRegister(inst),
@@ -160,15 +164,22 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
assert(offset & 0b11 == 0);
switch (tag) {
+ .cbz => {
+ if (std.math.cast(i19, @shrExact(offset, 2))) |_| {
+ return BranchType.cbz;
+ } else |_| {
+ return emit.fail("TODO support cbz branches larger than +-1 MiB", .{});
+ }
+ },
.b, .bl => {
- if (std.math.cast(i26, offset >> 2)) |_| {
+ if (std.math.cast(i26, @shrExact(offset, 2))) |_| {
return BranchType.unconditional_branch_immediate;
} else |_| {
- return emit.fail("TODO support branches larger than +-128 MiB", .{});
+ return emit.fail("TODO support unconditional branches larger than +-128 MiB", .{});
}
},
.b_cond => {
- if (std.math.cast(i19, offset >> 2)) |_| {
+ if (std.math.cast(i19, @shrExact(offset, 2))) |_| {
return BranchType.b_cond;
} else |_| {
return emit.fail("TODO support conditional branches larger than +-1 MiB", .{});
@@ -183,8 +194,10 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
if (isBranch(tag)) {
switch (emit.branch_types.get(inst).?) {
- .unconditional_branch_immediate => return 4,
- .b_cond => return 4,
+ .cbz,
+ .unconditional_branch_immediate,
+ .b_cond,
+ => return 4,
}
}
@@ -222,7 +235,11 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
fn isBranch(tag: Mir.Inst.Tag) bool {
return switch (tag) {
- .b, .bl, .b_cond => true,
+ .cbz,
+ .b,
+ .bl,
+ .b_cond,
+ => true,
else => false,
};
}
@@ -231,6 +248,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
+ .cbz => return emit.mir.instructions.items(.data)[inst].r_inst.inst,
.b, .bl => return emit.mir.instructions.items(.data)[inst].inst,
.b_cond => return emit.mir.instructions.items(.data)[inst].inst_cond.inst,
else => unreachable,
@@ -494,6 +512,23 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const r_inst = emit.mir.instructions.items(.data)[inst].r_inst;
+
+ const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len);
+ const branch_type = emit.branch_types.get(inst).?;
+ log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset });
+
+ switch (branch_type) {
+ .cbz => switch (tag) {
+ .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))),
+ else => unreachable,
+ },
+ else => unreachable,
+ }
+}
+
fn mirUnconditionalBranchRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const reg = emit.mir.instructions.items(.data)[inst].reg;
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index d2263c6e32..65b80549a9 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -40,6 +40,8 @@ pub const Inst = struct {
brk,
/// Pseudo-instruction: Call extern
call_extern,
+ /// Compare and Branch on Zero
+ cbz,
/// Compare (immediate)
cmp_immediate,
/// Compare (shifted register)
@@ -184,6 +186,13 @@ pub const Inst = struct {
rd: Register,
cond: bits.Instruction.Condition,
},
+ /// A register and another instruction
+ ///
+ /// Used by e.g. cbz
+ r_inst: struct {
+ rt: Register,
+ inst: Index,
+ },
/// Two registers
///
/// Used by e.g. mov_register
diff --git a/test/stage2/aarch64.zig b/test/stage2/aarch64.zig
index 64e6f95a84..580a375aef 100644
--- a/test/stage2/aarch64.zig
+++ b/test/stage2/aarch64.zig
@@ -102,6 +102,31 @@ pub fn addCases(ctx: *TestContext) !void {
,
"Hello, World!\n",
);
+
+ case.addCompareOutput(
+ \\pub fn main() void {
+ \\ foo(true);
+ \\}
+ \\
+ \\fn foo(x: bool) void {
+ \\ if (x) {
+ \\ print();
+ \\ }
+ \\}
+ \\
+ \\fn print() void {
+ \\ asm volatile ("svc #0"
+ \\ :
+ \\ : [number] "{x8}" (64),
+ \\ [arg1] "{x0}" (1),
+ \\ [arg2] "{x1}" (@ptrToInt("Hello, World!\n")),
+ \\ [arg3] "{x2}" ("Hello, World!\n".len),
+ \\ : "memory", "cc"
+ \\ );
+ \\}
+ ,
+ "Hello, World!\n",
+ );
}
// macOS tests
From 1c37622659f70115b698b5924472c2268bca63a8 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 12 Feb 2022 18:37:11 +0100
Subject: [PATCH 0195/2031] stage2 AArch64: Implement not for booleans
---
src/arch/aarch64/CodeGen.zig | 32 +++++++++-
src/arch/aarch64/Emit.zig | 34 ++++++++--
src/arch/aarch64/Mir.zig | 24 +++++++
src/arch/aarch64/bits.zig | 120 ++++++++++++++++++++++-------------
4 files changed, 162 insertions(+), 48 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index fade038eb0..8bd5324ed1 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -894,6 +894,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
+ const operand_ty = self.air.typeOf(ty_op.operand);
switch (operand) {
.dead => unreachable,
.unreach => unreachable,
@@ -924,7 +925,14 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :result r;
},
else => {
- return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch});
+ switch (operand_ty.zigTypeTag()) {
+ .Bool => {
+ // TODO convert this to mvn + and
+ const dest = try self.binOp(.xor, null, operand, .{ .immediate = 1 }, operand_ty, Type.bool);
+ break :result dest;
+ },
+ else => return self.fail("TODO bitwise not", .{}),
+ }
},
}
};
@@ -1013,6 +1021,7 @@ fn binOpRegister(
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add => .add_shifted_register,
.sub => .sub_shifted_register,
+ .xor => .eor_shifted_register,
else => unreachable,
};
const mir_data: Mir.Inst.Data = switch (tag) {
@@ -1025,6 +1034,13 @@ fn binOpRegister(
.imm6 = 0,
.shift = .lsl,
} },
+ .xor => .{ .rrr_imm6_logical_shift = .{
+ .rd = dest_reg,
+ .rn = lhs_reg,
+ .rm = rhs_reg,
+ .imm6 = 0,
+ .shift = .lsl,
+ } },
else => unreachable,
};
@@ -1137,6 +1153,7 @@ fn binOp(
rhs_ty: Type,
) !MCValue {
switch (tag) {
+ // Arithmetic operations on integers and floats
.add,
.sub,
=> {
@@ -1177,6 +1194,19 @@ fn binOp(
else => unreachable,
}
},
+ // Bitwise operations on integers
+ .xor => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO binary operations on vectors", .{}),
+ .Int => return self.fail("TODO binary operations on vectors", .{}),
+ .Bool => {
+ assert(lhs_ty.eql(rhs_ty));
+ // TODO boolean operations with immediates
+ return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
+ },
+ else => unreachable,
+ }
+ },
.ptr_add,
.ptr_sub,
=> return self.fail("TODO ptr_add, ptr_sub", .{}),
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 1772f08aa8..bc37cb56c6 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -106,6 +106,8 @@ pub fn emitMir(
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
+ .eor_shifted_register => try emit.mirLogicalShiftedRegister(inst),
+
.load_memory => try emit.mirLoadMemory(inst),
.ldp => try emit.mirLoadStoreRegisterPair(inst),
@@ -134,6 +136,7 @@ pub fn emitMir(
.mov_register => try emit.mirMoveRegister(inst),
.mov_to_from_sp => try emit.mirMoveRegister(inst),
+ .mvn => try emit.mirMoveRegister(inst),
.movk => try emit.mirMoveWideImmediate(inst),
.movz => try emit.mirMoveWideImmediate(inst),
@@ -638,6 +641,21 @@ fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirLogicalShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const rrr_imm6_logical_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_logical_shift;
+ const rd = rrr_imm6_logical_shift.rd;
+ const rn = rrr_imm6_logical_shift.rn;
+ const rm = rrr_imm6_logical_shift.rm;
+ const shift = rrr_imm6_logical_shift.shift;
+ const imm6 = rrr_imm6_logical_shift.imm6;
+
+ switch (tag) {
+ .eor_shifted_register => try emit.writeInstruction(Instruction.eor(rd, rn, rm, shift, imm6)),
+ else => unreachable,
+ }
+}
+
fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
assert(emit.mir.instructions.items(.tag)[inst] == .load_memory);
const payload = emit.mir.instructions.items(.data)[inst].payload;
@@ -821,11 +839,19 @@ fn mirLoadStoreRegisterRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
- const rr = emit.mir.instructions.items(.data)[inst].rr;
-
switch (tag) {
- .mov_register => try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, Instruction.Shift.none)),
- .mov_to_from_sp => try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false)),
+ .mov_register => {
+ const rr = emit.mir.instructions.items(.data)[inst].rr;
+ try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, .lsl, 0));
+ },
+ .mov_to_from_sp => {
+ const rr = emit.mir.instructions.items(.data)[inst].rr;
+ try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false));
+ },
+ .mvn => {
+ const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift;
+ try emit.writeInstruction(Instruction.orn(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, .lsl, 0));
+ },
else => unreachable,
}
}
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 65b80549a9..92b0604347 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -54,6 +54,8 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
+ /// Bitwise Exclusive OR (shifted register)
+ eor_shifted_register,
/// Pseudo-instruction: Load memory
///
/// Payload is `LoadMemory`
@@ -88,6 +90,8 @@ pub const Inst = struct {
movz,
/// Multiply
mul,
+ /// Bitwise NOT
+ mvn,
/// No Operation
nop,
/// Pseudo-instruction: Pop multiple registers
@@ -217,6 +221,15 @@ pub const Inst = struct {
imm12: u12,
sh: u1 = 0,
},
+ /// Two registers and a shift (shift type and 6-bit amount)
+ ///
+ /// Used by e.g. mvn
+ rr_imm6_shift: struct {
+ rd: Register,
+ rm: Register,
+ imm6: u6,
+ shift: bits.Instruction.AddSubtractShiftedRegisterShift,
+ },
/// Two registers
///
/// Used by e.g. mul
@@ -235,6 +248,17 @@ pub const Inst = struct {
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
},
+ /// Three registers and a shift (logical instruction version)
+ /// (shift type and 6-bit amount)
+ ///
+ /// Used by e.g. eor_shifted_register
+ rrr_imm6_logical_shift: struct {
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ imm6: u6,
+ shift: bits.Instruction.LogicalShiftedRegisterShift,
+ },
/// Two registers and a LoadStoreOffsetImmediate
///
/// Used by e.g. str_immediate
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
index 540a055c8e..a5d56cfcc7 100644
--- a/src/arch/aarch64/bits.zig
+++ b/src/arch/aarch64/bits.zig
@@ -344,23 +344,6 @@ pub const Instruction = union(enum) {
sf: u1,
},
- pub const Shift = struct {
- shift: Type = .lsl,
- amount: u6 = 0,
-
- pub const Type = enum(u2) {
- lsl,
- lsr,
- asr,
- ror,
- };
-
- pub const none = Shift{
- .shift = .lsl,
- .amount = 0,
- };
- };
-
pub const Condition = enum(u4) {
/// Integer: Equal
/// Floating point: Equal
@@ -819,25 +802,28 @@ pub const Instruction = union(enum) {
};
}
+ pub const LogicalShiftedRegisterShift = enum(u2) { lsl, lsr, asr, ror };
+
fn logicalShiftedRegister(
opc: u2,
n: u1,
- shift: Shift,
rd: Register,
rn: Register,
rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
) Instruction {
switch (rd.size()) {
32 => {
- assert(shift.amount < 32);
+ assert(amount < 32);
return Instruction{
.logical_shifted_register = .{
.rd = rd.id(),
.rn = rn.id(),
- .imm6 = shift.amount,
+ .imm6 = amount,
.rm = rm.id(),
.n = n,
- .shift = @enumToInt(shift.shift),
+ .shift = @enumToInt(shift),
.opc = opc,
.sf = 0b0,
},
@@ -848,10 +834,10 @@ pub const Instruction = union(enum) {
.logical_shifted_register = .{
.rd = rd.id(),
.rn = rn.id(),
- .imm6 = shift.amount,
+ .imm6 = amount,
.rm = rm.id(),
.n = n,
- .shift = @enumToInt(shift.shift),
+ .shift = @enumToInt(shift),
.opc = opc,
.sf = 0b1,
},
@@ -1159,36 +1145,84 @@ pub const Instruction = union(enum) {
// Logical (shifted register)
- pub fn @"and"(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b00, 0b0, shift, rd, rn, rm);
+ pub fn @"and"(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
}
- pub fn bic(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b00, 0b1, shift, rd, rn, rm);
+ pub fn bic(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
}
- pub fn orr(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b01, 0b0, shift, rd, rn, rm);
+ pub fn orr(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
}
- pub fn orn(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b01, 0b1, shift, rd, rn, rm);
+ pub fn orn(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
}
- pub fn eor(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b10, 0b0, shift, rd, rn, rm);
+ pub fn eor(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
}
- pub fn eon(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b10, 0b1, shift, rd, rn, rm);
+ pub fn eon(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
}
- pub fn ands(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b11, 0b0, shift, rd, rn, rm);
+ pub fn ands(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
}
- pub fn bics(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
- return logicalShiftedRegister(0b11, 0b1, shift, rd, rn, rm);
+ pub fn bics(
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ shift: LogicalShiftedRegisterShift,
+ amount: u6,
+ ) Instruction {
+ return logicalShiftedRegister(0b11, 0b1, rd, rn, rm, shift, amount);
}
// Add/subtract (immediate)
@@ -1316,11 +1350,11 @@ test "serialize instructions" {
const testcases = [_]Testcase{
.{ // orr x0, xzr, x1
- .inst = Instruction.orr(.x0, .xzr, .x1, Instruction.Shift.none),
+ .inst = Instruction.orr(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
},
.{ // orn x0, xzr, x1
- .inst = Instruction.orn(.x0, .xzr, .x1, Instruction.Shift.none),
+ .inst = Instruction.orn(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
},
.{ // movz x1, #4
@@ -1440,11 +1474,11 @@ test "serialize instructions" {
.expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
},
.{ // and x0, x4, x2
- .inst = Instruction.@"and"(.x0, .x4, .x2, .{}),
+ .inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0),
.expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
},
.{ // and x0, x4, x2, lsl #0x8
- .inst = Instruction.@"and"(.x0, .x4, .x2, .{ .shift = .lsl, .amount = 0x8 }),
+ .inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0x8),
.expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
},
.{ // add x0, x10, #10
From 783e216e7d49ce30032cd768ca266f5f08773bf4 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sat, 12 Feb 2022 21:12:18 +0100
Subject: [PATCH 0196/2031] stage2 AArch64: Fix issue in binOp and add
regression test
---
src/arch/aarch64/CodeGen.zig | 18 ++++++++++-
test/stage2/aarch64.zig | 62 +++++++++++++++++++++++++-----------
2 files changed, 61 insertions(+), 19 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 8bd5324ed1..8b5503f293 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -981,13 +981,19 @@ fn binOpRegister(
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
+ const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
+
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.lhs).?;
} else null;
+
const reg = try self.register_manager.allocReg(track_inst);
self.register_manager.freezeRegs(&.{reg});
+
+ if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
+
break :blk reg;
};
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
@@ -997,8 +1003,12 @@ fn binOpRegister(
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.rhs).?;
} else null;
+
const reg = try self.register_manager.allocReg(track_inst);
self.register_manager.freezeRegs(&.{reg});
+
+ if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
+
break :blk reg;
};
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
@@ -1077,6 +1087,8 @@ fn binOpImmediate(
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
+ const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
+
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -1084,8 +1096,12 @@ fn binOpImmediate(
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
).?;
} else null;
+
const reg = try self.register_manager.allocReg(track_inst);
self.register_manager.freezeRegs(&.{reg});
+
+ if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
+
break :blk reg;
};
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
@@ -3141,7 +3157,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .cset,
.data = .{ .r_cond = .{
.rd = reg,
- .cond = condition,
+ .cond = condition.negate(),
} },
});
},
diff --git a/test/stage2/aarch64.zig b/test/stage2/aarch64.zig
index 580a375aef..b16a29f56f 100644
--- a/test/stage2/aarch64.zig
+++ b/test/stage2/aarch64.zig
@@ -17,15 +17,8 @@ pub fn addCases(ctx: *TestContext) !void {
var case = ctx.exe("linux_aarch64 hello world", linux_aarch64);
// Regular old hello world
case.addCompareOutput(
- \\pub export fn _start() noreturn {
+ \\pub fn main() void {
\\ print();
- \\ exit();
- \\}
- \\
- \\fn doNothing() void {}
- \\
- \\fn answer() u64 {
- \\ return 0x1234abcd1234abcd;
\\}
\\
\\fn print() void {
@@ -38,16 +31,6 @@ pub fn addCases(ctx: *TestContext) !void {
\\ : "memory", "cc"
\\ );
\\}
- \\
- \\fn exit() noreturn {
- \\ asm volatile ("svc #0"
- \\ :
- \\ : [number] "{x8}" (93),
- \\ [arg1] "{x0}" (0)
- \\ : "memory", "cc"
- \\ );
- \\ unreachable;
- \\}
,
"Hello, World!\n",
);
@@ -129,6 +112,49 @@ pub fn addCases(ctx: *TestContext) !void {
);
}
+ {
+ var case = ctx.exe("large add function", linux_aarch64);
+
+ case.addCompareOutput(
+ \\pub fn main() void {
+ \\ assert(add(3, 4) == 791);
+ \\}
+ \\
+ \\fn add(a: u32, b: u32) u32 {
+ \\ const x: u32 = blk: {
+ \\ const c = a + b; // 7
+ \\ const d = a + c; // 10
+ \\ const e = d + b; // 14
+ \\ const f = d + e; // 24
+ \\ const g = e + f; // 38
+ \\ const h = f + g; // 62
+ \\ const i = g + h; // 100
+ \\ const j = i + d; // 110
+ \\ const k = i + j; // 210
+ \\ const l = k + c; // 217
+ \\ const m = l + d; // 227
+ \\ const n = m + e; // 241
+ \\ const o = n + f; // 265
+ \\ const p = o + g; // 303
+ \\ const q = p + h; // 365
+ \\ const r = q + i; // 465
+ \\ const s = r + j; // 575
+ \\ const t = s + k; // 785
+ \\ break :blk t;
+ \\ };
+ \\ const y = x + a; // 788
+ \\ const z = y + a; // 791
+ \\ return z;
+ \\}
+ \\
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ ,
+ "",
+ );
+ }
+
// macOS tests
{
var case = ctx.exe("hello world with updates", macos_aarch64);
From 98c71cc88a2d472dded057a4a6f1d610dad9e491 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sun, 13 Feb 2022 11:01:15 +0100
Subject: [PATCH 0197/2031] stage2 AArch64: Implement calling function pointers
---
src/arch/aarch64/CodeGen.zig | 164 ++++++++++-------------------------
1 file changed, 46 insertions(+), 118 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 8b5503f293..55c8c64794 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -2073,41 +2073,41 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
+ for (info.args) |mc_arg, arg_i| {
+ const arg = args[arg_i];
+ const arg_ty = self.air.typeOf(arg);
+ const arg_mcv = try self.resolveInst(args[arg_i]);
+
+ switch (mc_arg) {
+ .none => continue,
+ .undef => unreachable,
+ .immediate => unreachable,
+ .unreach => unreachable,
+ .dead => unreachable,
+ .embedded_in_code => unreachable,
+ .memory => unreachable,
+ .compare_flags_signed => unreachable,
+ .compare_flags_unsigned => unreachable,
+ .register => |reg| {
+ try self.register_manager.getReg(reg, null);
+ try self.genSetReg(arg_ty, reg, arg_mcv);
+ },
+ .stack_offset => {
+ return self.fail("TODO implement calling with parameters in memory", .{});
+ },
+ .ptr_stack_offset => {
+ return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
+ },
+ .ptr_embedded_in_code => {
+ return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
+ },
+ }
+ }
+
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
- for (info.args) |mc_arg, arg_i| {
- const arg = args[arg_i];
- const arg_ty = self.air.typeOf(arg);
- const arg_mcv = try self.resolveInst(args[arg_i]);
-
- switch (mc_arg) {
- .none => continue,
- .undef => unreachable,
- .immediate => unreachable,
- .unreach => unreachable,
- .dead => unreachable,
- .embedded_in_code => unreachable,
- .memory => unreachable,
- .compare_flags_signed => unreachable,
- .compare_flags_unsigned => unreachable,
- .register => |reg| {
- try self.register_manager.getReg(reg, null);
- try self.genSetReg(arg_ty, reg, arg_mcv);
- },
- .stack_offset => {
- return self.fail("TODO implement calling with parameters in memory", .{});
- },
- .ptr_stack_offset => {
- return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
- },
- .ptr_embedded_in_code => {
- return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
- },
- }
- }
-
- if (self.air.value(callee)) |func_value| {
+ if (self.air.value(callee)) |func_value| {
+ if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
@@ -2131,52 +2131,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
- } else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
- try self.genSetReg(Type.initTag(.usize), .x30, mcv);
-
- _ = try self.addInst(.{
- .tag = .blr,
- .data = .{ .reg = .x30 },
- });
- }
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- for (info.args) |mc_arg, arg_i| {
- const arg = args[arg_i];
- const arg_ty = self.air.typeOf(arg);
- const arg_mcv = try self.resolveInst(args[arg_i]);
- // Here we do not use setRegOrMem even though the logic is similar, because
- // the function call will move the stack pointer, so the offsets are different.
- switch (mc_arg) {
- .none => continue,
- .register => |reg| {
- try self.register_manager.getReg(reg, null);
- try self.genSetReg(arg_ty, reg, arg_mcv);
- },
- .stack_offset => {
- // Here we need to emit instructions like this:
- // mov qword ptr [rsp + stack_offset], x
- return self.fail("TODO implement calling with parameters in memory", .{});
- },
- .ptr_stack_offset => {
- return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
- },
- .ptr_embedded_in_code => {
- return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
- },
- .undef => unreachable,
- .immediate => unreachable,
- .unreach => unreachable,
- .dead => unreachable,
- .embedded_in_code => unreachable,
- .memory => unreachable,
- .compare_flags_signed => unreachable,
- .compare_flags_unsigned => unreachable,
- }
- }
-
- if (self.air.value(callee)) |func_value| {
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
// TODO I'm hacking my way through here by repurposing .memory for storing
@@ -2212,41 +2167,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
- } else {
- return self.fail("TODO implement calling runtime known function pointer", .{});
- }
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- for (info.args) |mc_arg, arg_i| {
- const arg = args[arg_i];
- const arg_ty = self.air.typeOf(arg);
- const arg_mcv = try self.resolveInst(args[arg_i]);
-
- switch (mc_arg) {
- .none => continue,
- .undef => unreachable,
- .immediate => unreachable,
- .unreach => unreachable,
- .dead => unreachable,
- .embedded_in_code => unreachable,
- .memory => unreachable,
- .compare_flags_signed => unreachable,
- .compare_flags_unsigned => unreachable,
- .register => |reg| {
- try self.register_manager.getReg(reg, null);
- try self.genSetReg(arg_ty, reg, arg_mcv);
- },
- .stack_offset => {
- return self.fail("TODO implement calling with parameters in memory", .{});
- },
- .ptr_stack_offset => {
- return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
- },
- .ptr_embedded_in_code => {
- return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
- },
- }
- }
- if (self.air.value(callee)) |func_value| {
+ } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
@@ -2266,10 +2187,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
- } else {
- return self.fail("TODO implement calling runtime known function pointer", .{});
- }
- } else unreachable;
+ } else unreachable;
+ } else {
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+ try self.genSetReg(ty, .x30, mcv);
+
+ _ = try self.addInst(.{
+ .tag = .blr,
+ .data = .{ .reg = .x30 },
+ });
+ }
const result: MCValue = result: {
switch (info.return_value) {
From 22895f5616c663bb7b8ad9866c29d00bc4bc315c Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Mon, 14 Feb 2022 22:33:01 +0100
Subject: [PATCH 0198/2031] stage2 AArch64: Enable behavior testing
---
src/arch/aarch64/CodeGen.zig | 34 +++++++++++++++++++++++++++----
test/behavior.zig | 2 +-
test/behavior/align.zig | 18 +++++++++++++++++
test/behavior/alignof.zig | 1 +
test/behavior/array.zig | 27 +++++++++++++++++++++++++
test/behavior/basic.zig | 32 +++++++++++++++++++++++++++++
test/behavior/bit_shifting.zig | 1 +
test/behavior/bugs/1381.zig | 1 +
test/behavior/bugs/1486.zig | 2 ++
test/behavior/bugs/1735.zig | 1 +
test/behavior/bugs/1741.zig | 1 +
test/behavior/bugs/2006.zig | 1 +
test/behavior/bugs/2578.zig | 1 +
test/behavior/bugs/3007.zig | 1 +
test/behavior/bugs/3112.zig | 1 +
test/behavior/bugs/3367.zig | 1 +
test/behavior/bugs/394.zig | 1 +
test/behavior/bugs/656.zig | 1 +
test/behavior/bugs/7250.zig | 1 +
test/behavior/cast.zig | 36 +++++++++++++++++++++++++++++++++
test/behavior/fn_delegation.zig | 1 +
test/behavior/ir_block_deps.zig | 1 +
test/behavior/optional.zig | 10 +++++++++
test/behavior/reflection.zig | 1 +
test/behavior/slice.zig | 14 +++++++++++++
test/behavior/struct.zig | 28 +++++++++++++++++++++++++
test/behavior/truncate.zig | 9 +++++++++
test/behavior/var_args.zig | 3 +++
28 files changed, 226 insertions(+), 5 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 55c8c64794..b9d5a29f18 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1029,14 +1029,16 @@ fn binOpRegister(
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
const mir_tag: Mir.Inst.Tag = switch (tag) {
- .add => .add_shifted_register,
- .sub => .sub_shifted_register,
+ .add, .ptr_add => .add_shifted_register,
+ .sub, .ptr_sub => .sub_shifted_register,
.xor => .eor_shifted_register,
else => unreachable,
};
const mir_data: Mir.Inst.Data = switch (tag) {
.add,
.sub,
+ .ptr_add,
+ .ptr_sub,
=> .{ .rrr_imm6_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
@@ -1225,7 +1227,24 @@ fn binOp(
},
.ptr_add,
.ptr_sub,
- => return self.fail("TODO ptr_add, ptr_sub", .{}),
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Pointer => {
+ const ptr_ty = lhs_ty;
+ const pointee_ty = switch (ptr_ty.ptrSize()) {
+ .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
+ else => ptr_ty.childType(),
+ };
+
+ if (pointee_ty.abiSize(self.target.*) > 1) {
+ return self.fail("TODO ptr_add, ptr_sub with more element sizes", .{});
+ }
+
+ return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
+ },
+ else => unreachable,
+ }
+ },
else => unreachable,
}
}
@@ -1439,7 +1458,14 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_ty = self.air.getRefType(ty_op.ty);
+ const payload_ty = error_union_ty.errorUnionPayload();
+ const mcv = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasRuntimeBits()) break :result mcv;
+
+ return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
diff --git a/test/behavior.zig b/test/behavior.zig
index db6863a8b0..abfd8fb0bf 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -54,7 +54,7 @@ test {
_ = @import("behavior/decltest.zig");
}
- if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
+ if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_aarch64) {
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bugs/624.zig");
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 96278524c0..a8d8fcd206 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -27,6 +27,7 @@ test "default alignment allows unspecified in type syntax" {
}
test "implicitly decreasing pointer alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
try expect(addUnaligned(&a, &b) == 7);
@@ -37,6 +38,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
}
test "@alignCast pointers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: u32 align(4) = 1;
expectsOnly1(&x);
try expect(x == 2);
@@ -102,6 +104,7 @@ fn fnWithAlignedStack() i32 {
}
test "implicitly decreasing slice alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a: u32 align(4) = 3;
@@ -113,6 +116,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testBytesAlign(0x33);
@@ -124,6 +128,7 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array align(4) = [_]u32{ 1, 1 };
@@ -139,6 +144,7 @@ fn sliceExpects4(slice: []align(4) u32) void {
}
test "return error union with 128-bit integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(3 == try give());
@@ -148,6 +154,7 @@ fn give() anyerror!u128 {
}
test "page aligned array on stack" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -173,6 +180,7 @@ fn noop1() align(1) void {}
fn noop4() align(4) void {}
test "function alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -189,6 +197,7 @@ test "function alignment" {
}
test "implicitly decreasing fn alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@@ -216,6 +225,7 @@ fn alignedBig() align(16) i32 {
}
test "@alignCast functions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@@ -239,6 +249,7 @@ fn simple4() align(4) i32 {
}
test "generic function with align param" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@@ -260,6 +271,7 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
}
test "runtime known array index has best alignment possible" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -302,6 +314,7 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
}
test "alignment of function with c calling convention" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var runtime_nothing = ¬hing;
@@ -318,6 +331,7 @@ const DefaultAligned = struct {
};
test "read 128-bit field from default aligned struct in stack memory" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -337,6 +351,7 @@ var default_aligned_global = DefaultAligned{
};
test "read 128-bit field from default aligned struct in global memory" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@@ -348,6 +363,7 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@@ -369,6 +385,7 @@ test "struct field explicit alignment" {
}
test "align(@alignOf(T)) T does not force resolution of T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@@ -397,6 +414,7 @@ test "align(@alignOf(T)) T does not force resolution of T" {
}
test "align(N) on functions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig
index 749855db52..5a49146694 100644
--- a/test/behavior/alignof.zig
+++ b/test/behavior/alignof.zig
@@ -11,6 +11,7 @@ const Foo = struct {
};
test "@alignOf(T) before referencing T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index 23820e71b5..e93f0f3e90 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -6,6 +6,7 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "array to slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a: u32 align(4) = 3;
@@ -20,6 +21,7 @@ test "array to slice" {
}
test "arrays" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array: [5]u32 = undefined;
@@ -46,6 +48,7 @@ fn getArrayLen(a: []const u32) usize {
}
test "array init with mult" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a = 'a';
@@ -57,6 +60,7 @@ test "array init with mult" {
}
test "array literal with explicit type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 };
@@ -86,6 +90,7 @@ const ArrayDotLenConstExpr = struct {
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array = [2]u8{ 1, 2 };
@@ -94,6 +99,7 @@ test "array literal with specified size" {
}
test "array len field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var arr = [4]u8{ 0, 0, 0, 0 };
@@ -105,6 +111,7 @@ test "array len field" {
}
test "array with sentinels" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -134,6 +141,7 @@ test "array with sentinels" {
}
test "void arrays" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array: [4]void = undefined;
@@ -144,6 +152,7 @@ test "void arrays" {
}
test "nested arrays" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" };
@@ -157,6 +166,7 @@ test "nested arrays" {
}
test "implicit comptime in array type size" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var arr: [plusOne(10)]bool = undefined;
@@ -168,6 +178,7 @@ fn plusOne(x: u32) u32 {
}
test "single-item pointer to array indexing and slicing" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testSingleItemPtrArrayIndexSlice();
@@ -193,6 +204,7 @@ fn doSomeMangling(array: *[4]u8) void {
}
test "implicit cast zero sized array ptr to slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
{
@@ -208,6 +220,7 @@ test "implicit cast zero sized array ptr to slice" {
}
test "anonymous list literal syntax" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -227,6 +240,7 @@ var s_array: [8]Sub = undefined;
const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -243,6 +257,7 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -264,6 +279,7 @@ test "read/write through global variable array of struct fields initialized via
}
test "implicit cast single-item pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -284,6 +300,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -296,6 +313,7 @@ test "comptime evaluating function that takes array by value" {
}
test "runtime initialize array elem and then implicit cast to slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -306,6 +324,7 @@ test "runtime initialize array elem and then implicit cast to slice" {
}
test "array literal as argument to function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -334,6 +353,7 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -395,6 +415,7 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -420,6 +441,7 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -437,6 +459,7 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -455,6 +478,7 @@ test "type deduction for array subscript expression" {
}
test "sentinel element count towards the ABI size calculation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -481,6 +505,7 @@ test "sentinel element count towards the ABI size calculation" {
}
test "zero-sized array with recursive type definition" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -505,6 +530,7 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -540,6 +566,7 @@ test "type coercion of anon struct literal to array" {
}
test "type coercion of pointer to anon struct literal to pointer to array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 18a24f9b3a..0c2c293d23 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -15,6 +15,7 @@ test "empty function with comments" {
}
test "truncate" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(testTruncate(0x10fd) == 0xfd);
@@ -25,6 +26,7 @@ fn testTruncate(x: u32) u8 {
}
test "truncate to non-power-of-two integers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
@@ -46,6 +48,7 @@ const g1: i32 = 1233 + 1;
var g2: i32 = 0;
test "global variables" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(g2 == 0);
g2 = g1;
try expect(g2 == 1234);
@@ -112,6 +115,7 @@ fn first4KeysOfHomeRow() []const u8 {
}
test "return string from function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -119,12 +123,14 @@ test "return string from function" {
}
test "hex escape" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
test "multiline string" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@@ -137,6 +143,7 @@ test "multiline string" {
}
test "multiline string comments at start" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@@ -149,6 +156,7 @@ test "multiline string comments at start" {
}
test "multiline string comments at end" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@@ -161,6 +169,7 @@ test "multiline string comments at end" {
}
test "multiline string comments in middle" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@@ -173,6 +182,7 @@ test "multiline string comments in middle" {
}
test "multiline string comments at multiple places" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@@ -191,6 +201,7 @@ test "string concatenation" {
}
test "array mult operator" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
@@ -216,6 +227,7 @@ test "compile time global reinterpret" {
}
test "cast undefined" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const array: [100]u8 = undefined;
@@ -227,6 +239,7 @@ fn testCastUndefined(x: []const u8) void {
}
test "implicit cast after unreachable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(outer() == 1234);
@@ -284,6 +297,7 @@ fn fB() []const u8 {
}
test "call function pointer in struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -310,6 +324,7 @@ const FnPtrWrapper = struct {
};
test "const ptr from var variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: u64 = undefined;
@@ -326,6 +341,7 @@ fn copy(src: *const u64, dst: *u64) void {
}
test "call result of if else expression" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -339,6 +355,7 @@ fn f2(x: bool) []const u8 {
}
test "memcpy and memset intrinsics" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -361,6 +378,7 @@ fn testMemcpyMemset() !void {
}
test "variable is allowed to be a pointer to an opaque type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -374,6 +392,7 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
}
test "take address of parameter" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -400,6 +419,7 @@ fn testPointerToVoidReturnType2() *const void {
}
test "array 2D const double ptr" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -419,6 +439,7 @@ fn testArray2DConstDoublePtr(ptr: *const f32) !void {
}
test "double implicit cast in same expression" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -430,6 +451,7 @@ fn nine() u8 {
}
test "struct inside function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testStructInFn();
@@ -451,6 +473,7 @@ fn testStructInFn() !void {
}
test "fn call returning scalar optional in equality expression" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(getNull() == null);
}
@@ -459,6 +482,7 @@ fn getNull() ?*i32 {
}
test "global variable assignment with optional unwrapping with var initialized to undefined" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -476,6 +500,7 @@ test "global variable assignment with optional unwrapping with var initialized t
var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -550,6 +575,7 @@ test "comptime cast fn to ptr" {
}
test "equality compare fn ptrs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var a = &emptyFn;
@@ -557,6 +583,7 @@ test "equality compare fn ptrs" {
}
test "self reference through fn ptr field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@@ -576,6 +603,7 @@ test "self reference through fn ptr field" {
}
test "global variable initialized to global variable array element" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -593,6 +621,7 @@ var gdt = [_]GDTEntry{
var global_ptr = &gdt[0];
test "global constant is loaded with a runtime-known index" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -610,6 +639,7 @@ test "global constant is loaded with a runtime-known index" {
}
test "multiline string literal is null terminated" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -643,6 +673,7 @@ test "explicit cast optional pointers" {
}
test "pointer comparison" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -655,6 +686,7 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
}
test "string concatenation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig
index c0b2729bdf..1a01cbd732 100644
--- a/test/behavior/bit_shifting.zig
+++ b/test/behavior/bit_shifting.zig
@@ -61,6 +61,7 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt
}
test "sharded table" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/bugs/1381.zig b/test/behavior/bugs/1381.zig
index 91a253af24..2f05d2fa96 100644
--- a/test/behavior/bugs/1381.zig
+++ b/test/behavior/bugs/1381.zig
@@ -12,6 +12,7 @@ const A = union(enum) {
};
test "union that needs padding bytes inside an array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var as = [_]A{
diff --git a/test/behavior/bugs/1486.zig b/test/behavior/bugs/1486.zig
index 8f954a3600..91d5b621d2 100644
--- a/test/behavior/bugs/1486.zig
+++ b/test/behavior/bugs/1486.zig
@@ -1,10 +1,12 @@
const std = @import("std");
const expect = std.testing.expect;
+const builtin = @import("builtin");
const ptr = &global;
var global: usize = 123;
test "constant pointer to global variable causes runtime load" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
global = 1234;
try expect(&global == ptr);
try expect(ptr.* == 1234);
diff --git a/test/behavior/bugs/1735.zig b/test/behavior/bugs/1735.zig
index c07bd9472b..556b899de1 100644
--- a/test/behavior/bugs/1735.zig
+++ b/test/behavior/bugs/1735.zig
@@ -42,6 +42,7 @@ const a = struct {
};
test "initialization" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/bugs/1741.zig b/test/behavior/bugs/1741.zig
index 280aafc52e..f4cc2101c4 100644
--- a/test/behavior/bugs/1741.zig
+++ b/test/behavior/bugs/1741.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
test "fixed" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const x: f32 align(128) = 12.34;
diff --git a/test/behavior/bugs/2006.zig b/test/behavior/bugs/2006.zig
index 4d76230c88..fcacb9a2c6 100644
--- a/test/behavior/bugs/2006.zig
+++ b/test/behavior/bugs/2006.zig
@@ -6,6 +6,7 @@ const S = struct {
p: *S,
};
test "bug 2006" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var a: S = undefined;
a = S{ .p = undefined };
diff --git a/test/behavior/bugs/2578.zig b/test/behavior/bugs/2578.zig
index 15f5bf0e53..90db296158 100644
--- a/test/behavior/bugs/2578.zig
+++ b/test/behavior/bugs/2578.zig
@@ -12,6 +12,7 @@ fn bar(pointer: ?*anyopaque) void {
}
test "fixed" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/3007.zig b/test/behavior/bugs/3007.zig
index 0b3cbdc56d..c93bbf8d20 100644
--- a/test/behavior/bugs/3007.zig
+++ b/test/behavior/bugs/3007.zig
@@ -19,6 +19,7 @@ fn get_foo() Foo.FooError!*Foo {
}
test "fixed" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/3112.zig b/test/behavior/bugs/3112.zig
index 089f3e59f6..ebd8fd1ef3 100644
--- a/test/behavior/bugs/3112.zig
+++ b/test/behavior/bugs/3112.zig
@@ -12,6 +12,7 @@ fn prev(p: ?State) void {
}
test "zig test crash" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
diff --git a/test/behavior/bugs/3367.zig b/test/behavior/bugs/3367.zig
index f540fdf6df..6468498ab6 100644
--- a/test/behavior/bugs/3367.zig
+++ b/test/behavior/bugs/3367.zig
@@ -10,6 +10,7 @@ const Mixin = struct {
};
test "container member access usingnamespace decls" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo = Foo{};
diff --git a/test/behavior/bugs/394.zig b/test/behavior/bugs/394.zig
index ec1bd5cc9f..28934c8dd0 100644
--- a/test/behavior/bugs/394.zig
+++ b/test/behavior/bugs/394.zig
@@ -11,6 +11,7 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "bug 394 fixed" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const x = S{
diff --git a/test/behavior/bugs/656.zig b/test/behavior/bugs/656.zig
index bd93c2b88c..d71dc426f9 100644
--- a/test/behavior/bugs/656.zig
+++ b/test/behavior/bugs/656.zig
@@ -11,6 +11,7 @@ const Value = struct {
};
test "optional if after an if in a switch prong of a switch with 2 prongs in an else" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try foo(false, true);
diff --git a/test/behavior/bugs/7250.zig b/test/behavior/bugs/7250.zig
index 27810acea4..ee04847e51 100644
--- a/test/behavior/bugs/7250.zig
+++ b/test/behavior/bugs/7250.zig
@@ -14,6 +14,7 @@ threadlocal var g_uart0 = nrfx_uart_t{
};
test "reference a global threadlocal variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 4028d8c5f1..85e3368441 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -18,6 +18,7 @@ test "integer literal to pointer cast" {
}
test "peer type resolution: ?T and T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(peerTypeTAndOptionalT(true, false).? == 0);
@@ -94,6 +95,7 @@ test "comptime_int @intToFloat" {
}
test "@floatToInt" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -116,6 +118,7 @@ fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -174,6 +177,7 @@ test "@floatCast comptime_int and comptime_float" {
}
test "coerce undefined to optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(MakeType(void).getNull() == null);
@@ -193,6 +197,7 @@ fn MakeType(comptime T: type) type {
}
test "implicit cast from *[N]T to [*c]T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: [4]u16 = [4]u16{ 0, 1, 2, 3 };
@@ -205,6 +210,7 @@ test "implicit cast from *[N]T to [*c]T" {
}
test "*usize to *void" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var i = @as(usize, 0);
var v = @ptrCast(*void, &i);
v.* = {};
@@ -230,6 +236,7 @@ test "@intCast to u0 and use the result" {
}
test "peer result null and comptime_int" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -253,6 +260,7 @@ test "peer result null and comptime_int" {
}
test "*const ?[*]const T to [*c]const [*c]const T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array = [_]u8{ 'o', 'k' };
@@ -264,6 +272,7 @@ test "*const ?[*]const T to [*c]const [*c]const T" {
}
test "array coersion to undefined at runtime" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@setRuntimeSafety(true);
@@ -293,6 +302,7 @@ fn implicitIntLitToOptional() void {
}
test "return u8 coercing into ?u32 return type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -313,6 +323,7 @@ test "cast from ?[*]T to ??[*]T" {
}
test "peer type unsigned int to signed" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -325,6 +336,7 @@ test "peer type unsigned int to signed" {
}
test "expected [*c]const u8, found [*:0]const u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -384,6 +396,7 @@ fn castToOptionalTypeError(z: i32) !void {
}
test "implicitly cast from [0]T to anyerror![]T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -455,6 +468,7 @@ fn testCastConstArrayRefToConstSlice() !void {
}
test "peer type resolution: error and [N]T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -689,6 +703,7 @@ test "type coercion related to sentinel-termination" {
}
test "peer type resolution implicit cast to return type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -710,6 +725,7 @@ test "peer type resolution implicit cast to return type" {
}
test "peer type resolution implicit cast to variable type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -806,6 +822,7 @@ test "comptime float casts" {
}
test "pointer reinterpret const float to int" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -822,6 +839,7 @@ test "pointer reinterpret const float to int" {
}
test "implicit cast from [*]T to ?*anyopaque" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -840,6 +858,7 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
}
test "compile time int to ptr of function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -857,6 +876,7 @@ fn foobar(func: PFN_void) !void {
}
test "implicit ptr to *anyopaque" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -871,6 +891,7 @@ test "implicit ptr to *anyopaque" {
}
test "return null from fn() anyerror!?&T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -887,6 +908,7 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
}
test "peer type resolution: [0]u8 and []const u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -907,6 +929,7 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -920,6 +943,7 @@ fn castToOptionalSlice() ?[]const u8 {
}
test "cast u128 to f128 and back" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -941,6 +965,7 @@ fn cast128Float(x: u128) f128 {
}
test "implicit cast from *[N]T to ?[*]T" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -956,6 +981,7 @@ test "implicit cast from *[N]T to ?[*]T" {
}
test "implicit cast from *T to ?*anyopaque" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -970,6 +996,7 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
}
test "implicit cast *[0]T to E![]const u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -987,6 +1014,7 @@ test "cast from array reference to fn: comptime fn ptr" {
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "cast from array reference to fn: runtime fn ptr" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -996,6 +1024,7 @@ test "cast from array reference to fn: runtime fn ptr" {
}
test "*const [N]null u8 to ?[]const u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1034,6 +1063,7 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" {
var global_struct: struct { f0: usize } = undefined;
test "assignment to optional pointer result loc" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1043,6 +1073,7 @@ test "assignment to optional pointer result loc" {
}
test "cast between *[N]void and []void" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1052,6 +1083,7 @@ test "cast between *[N]void and []void" {
}
test "peer resolve arrays of different size to const slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1065,6 +1097,7 @@ fn boolToStr(b: bool) []const u8 {
}
test "cast f16 to wider types" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1083,6 +1116,7 @@ test "cast f16 to wider types" {
}
test "cast f128 to narrower types" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1101,6 +1135,7 @@ test "cast f128 to narrower types" {
}
test "peer type resolution: unreachable, null, slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -1119,6 +1154,7 @@ test "peer type resolution: unreachable, null, slice" {
}
test "cast i8 fn call peers to i32 result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig
index 25ec3dea1b..eee8f52490 100644
--- a/test/behavior/fn_delegation.zig
+++ b/test/behavior/fn_delegation.zig
@@ -32,6 +32,7 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 {
}
test "fn delegation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const foo = Foo{};
diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig
index cbc5cc2419..d7d50b4be1 100644
--- a/test/behavior/ir_block_deps.zig
+++ b/test/behavior/ir_block_deps.zig
@@ -18,6 +18,7 @@ fn getErrInt() anyerror!i32 {
}
test "ir block deps" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index 3caf777195..78788d6556 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -5,6 +5,7 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "passing an optional integer as a parameter" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -25,6 +26,7 @@ test "passing an optional integer as a parameter" {
pub const EmptyStruct = struct {};
test "optional pointer to size zero struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -34,6 +36,7 @@ test "optional pointer to size zero struct" {
}
test "equality compare optional pointers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -58,6 +61,7 @@ fn testNullPtrsEql() !void {
}
test "optional with void type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -69,6 +73,7 @@ test "optional with void type" {
}
test "address of unwrap optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -89,6 +94,7 @@ test "address of unwrap optional" {
}
test "nested optional field in struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -105,6 +111,7 @@ test "nested optional field in struct" {
}
test "equality compare optional with non-optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -142,6 +149,7 @@ fn test_cmp_optional_non_optional() !void {
}
test "unwrap function call with optional pointer return value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -163,6 +171,7 @@ test "unwrap function call with optional pointer return value" {
}
test "nested orelse" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -189,6 +198,7 @@ test "nested orelse" {
}
test "self-referential struct through a slice of optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig
index 96c81fe0d0..a181e95b86 100644
--- a/test/behavior/reflection.zig
+++ b/test/behavior/reflection.zig
@@ -28,6 +28,7 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
}
test "reflection: @field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index badaf7ef03..4b73a3a140 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -27,6 +27,7 @@ comptime {
}
test "slicing" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -68,6 +69,7 @@ test "comptime slice of undefined pointer of length 0" {
}
test "implicitly cast array of size 0 to slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -80,6 +82,7 @@ fn assertLenIsZero(msg: []const u8) !void {
}
test "access len index of sentinel-terminated slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
@@ -129,6 +132,7 @@ test "slice of type" {
}
test "generic malloc free" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -187,6 +191,7 @@ test "comptime pointer cast array and then slice" {
}
test "slicing zero length array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -202,6 +207,7 @@ test "slicing zero length array" {
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -215,6 +221,7 @@ test "compile time slice of pointer to hard coded address" {
}
test "slice string literal has correct type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -230,6 +237,7 @@ test "slice string literal has correct type" {
}
test "result location zero sized array inside struct field implicit cast to slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const E = struct {
@@ -240,6 +248,7 @@ test "result location zero sized array inside struct field implicit cast to slic
}
test "runtime safety lets us slice from len..len" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -252,6 +261,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -262,6 +272,7 @@ test "C pointer" {
}
test "C pointer slice access" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -291,6 +302,7 @@ fn sliceSum(comptime q: []const u8) i32 {
}
test "slice type with custom alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -305,6 +317,7 @@ test "slice type with custom alignment" {
}
test "obtaining a null terminated slice" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -350,6 +363,7 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index ecdd6a1846..8428ea886f 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -9,6 +9,7 @@ const maxInt = std.math.maxInt;
top_level_field: i32,
test "top level fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var instance = @This(){
@@ -42,6 +43,7 @@ const StructWithFields = struct {
};
test "non-packed struct has fields padded out to the required alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const foo = StructWithFields{ .a = 5, .b = 1, .c = 10, .d = 2 };
@@ -65,6 +67,7 @@ const SmallStruct = struct {
};
test "lower unnamed constants" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var foo = SmallStruct{ .a = 1, .b = 255 };
try expect(foo.first() == 1);
try expect(foo.second() == 255);
@@ -83,6 +86,7 @@ const StructFoo = struct {
};
test "structs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo: StructFoo = undefined;
@@ -101,6 +105,7 @@ fn testMutation(foo: *StructFoo) void {
}
test "struct byval assign" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo1: StructFoo = undefined;
@@ -134,6 +139,7 @@ fn returnEmptyStructInstance() StructWithNoFields {
}
test "fn call of struct field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Foo = struct {
@@ -165,12 +171,14 @@ const MemberFnTestFoo = struct {
};
test "call member function directly" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const result = MemberFnTestFoo.member(instance);
try expect(result == 1234);
}
test "store member function in variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const memberFn = MemberFnTestFoo.member;
const result = memberFn(instance);
@@ -178,6 +186,7 @@ test "store member function in variable" {
}
test "member functions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const r = MemberFnRand{ .seed = 1234 };
try expect(r.getSeed() == 1234);
}
@@ -189,6 +198,7 @@ const MemberFnRand = struct {
};
test "return struct byval from function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bar = makeBar2(1234, 5678);
@@ -206,6 +216,7 @@ fn makeBar2(x: i32, y: i32) Bar {
}
test "call method with mutable reference to struct with no fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -238,6 +249,7 @@ test "usingnamespace within struct scope" {
}
test "struct field init with catch" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@@ -296,6 +308,7 @@ const Val = struct {
};
test "struct point to self" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -314,6 +327,7 @@ test "struct point to self" {
}
test "void struct fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -334,6 +348,7 @@ const VoidStructFieldsFoo = struct {
};
test "return empty struct from fn" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -347,6 +362,7 @@ fn testReturnEmptyStructFromFn() EmptyStruct2 {
}
test "pass slice of empty struct to fn" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -359,6 +375,7 @@ fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
}
test "self-referencing struct via array member" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -389,6 +406,7 @@ const EmptyStruct = struct {
};
test "align 1 field before self referential align 8 field as slice return type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -413,6 +431,7 @@ const APackedStruct = packed struct {
};
test "packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -438,6 +457,7 @@ const Foo96Bits = packed struct {
};
test "packed struct 24bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -484,6 +504,7 @@ test "packed struct 24bits" {
}
test "runtime struct initialization of bitfield" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -523,6 +544,7 @@ const Bitfields = packed struct {
};
test "native bit field understands endianness" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -546,6 +568,7 @@ test "native bit field understands endianness" {
}
test "implicit cast packed struct field to const ptr" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -581,6 +604,7 @@ test "zero-bit field in packed struct" {
}
test "packed struct with non-ABI-aligned field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -610,6 +634,7 @@ const bit_field_1 = BitField1{
};
test "bit field access" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -642,6 +667,7 @@ fn getC(data: *const BitField1) u2 {
}
test "default struct initialization fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -907,6 +933,7 @@ test "packed struct field passed to generic function" {
}
test "anonymous struct literal syntax" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1100,6 +1127,7 @@ test "type coercion of pointer to anon struct literal to pointer to struct" {
}
test "packed struct with undefined initializers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig
index 001ba538b2..7fe5b8ecb6 100644
--- a/test/behavior/truncate.zig
+++ b/test/behavior/truncate.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "truncate u0 to larger integer allowed and has comptime known result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: u0 = 0;
@@ -11,6 +12,7 @@ test "truncate u0 to larger integer allowed and has comptime known result" {
}
test "truncate.u0.literal" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var z = @truncate(u0, 0);
@@ -18,6 +20,7 @@ test "truncate.u0.literal" {
}
test "truncate.u0.const" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const c0: usize = 0;
@@ -26,6 +29,7 @@ test "truncate.u0.const" {
}
test "truncate.u0.var" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var d: u8 = 2;
@@ -34,6 +38,7 @@ test "truncate.u0.var" {
}
test "truncate i0 to larger integer allowed and has comptime known result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: i0 = 0;
@@ -42,6 +47,7 @@ test "truncate i0 to larger integer allowed and has comptime known result" {
}
test "truncate.i0.literal" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var z = @truncate(i0, 0);
@@ -49,6 +55,7 @@ test "truncate.i0.literal" {
}
test "truncate.i0.const" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const c0: isize = 0;
@@ -57,6 +64,7 @@ test "truncate.i0.const" {
}
test "truncate.i0.var" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var d: i8 = 2;
@@ -65,6 +73,7 @@ test "truncate.i0.var" {
}
test "truncate on comptime integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x = @truncate(u16, 9999);
diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig
index 63b8c35e1b..0e37c845b6 100644
--- a/test/behavior/var_args.zig
+++ b/test/behavior/var_args.zig
@@ -25,6 +25,7 @@ fn readFirstVarArg(args: anytype) void {
}
test "send void arg to var args" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -84,6 +85,7 @@ fn foo2(args: anytype) bool {
}
test "array of var args functions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -93,6 +95,7 @@ test "array of var args functions" {
}
test "pass zero length array to var args param" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 6c195db03a502074952181261807cf9a8e140d8d Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Mon, 14 Feb 2022 22:36:13 +0100
Subject: [PATCH 0199/2031] ci: add aarch64-linux behavior tests
---
ci/zinc/linux_test.sh | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/ci/zinc/linux_test.sh b/ci/zinc/linux_test.sh
index 8f3eaacc7e..453269029d 100755
--- a/ci/zinc/linux_test.sh
+++ b/ci/zinc/linux_test.sh
@@ -7,8 +7,9 @@ ZIG=$DEBUG_STAGING/bin/zig
$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM
$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -ofmt=c
-$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
-$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin
+$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
+$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin
+$ZIG test test/behavior.zig -fno-stage1 -I test -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test
$ZIG build test-behavior -fqemu -fwasmtime
From 807edd2234b016cd5470c51ac2bac451554c614d Mon Sep 17 00:00:00 2001
From: John Schmidt <3405586+schmee@users.noreply.github.com>
Date: Tue, 15 Feb 2022 03:52:12 +0100
Subject: [PATCH 0200/2031] LLVM backend: refactor LLVM bitcount ops (#10882)
Use `llvm.getIntrinsic` instead of `llvm.getNamedFunction`
---
src/codegen/llvm.zig | 56 +++++++++++---------------------------------
1 file changed, 14 insertions(+), 42 deletions(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 493b895d5d..3d65829c1e 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2203,8 +2203,8 @@ pub const FuncGen = struct {
.memcpy => try self.airMemcpy(inst),
.set_union_tag => try self.airSetUnionTag(inst),
.get_union_tag => try self.airGetUnionTag(inst),
- .clz => try self.airClzCtz(inst, "ctlz"),
- .ctz => try self.airClzCtz(inst, "cttz"),
+ .clz => try self.airClzCtz(inst, "llvm.ctlz"),
+ .ctz => try self.airClzCtz(inst, "llvm.cttz"),
.popcount => try self.airPopCount(inst),
.tag_name => try self.airTagName(inst),
.error_name => try self.airErrorName(inst),
@@ -4320,40 +4320,24 @@ pub const FuncGen = struct {
return self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, "");
}
- fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, prefix: [*:0]const u8) !?*const llvm.Value {
+ fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const target = self.dg.module.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- const vec_len: ?u32 = switch (operand_ty.zigTypeTag()) {
- .Vector => operand_ty.vectorLen(),
- else => null,
- };
- var fn_name_buf: [100]u8 = undefined;
- const llvm_fn_name = if (vec_len) |len|
- std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.v{d}i{d}", .{
- prefix, len, bits,
- }) catch unreachable
- else
- std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.i{d}", .{
- prefix, bits,
- }) catch unreachable;
const llvm_i1 = self.context.intType(1);
- const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
- const param_types = [_]*const llvm.Type{ operand_llvm_ty, llvm_i1 };
- const fn_type = llvm.functionType(operand_llvm_ty, ¶m_types, param_types.len, .False);
- break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
- };
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() };
const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, "");
const result_ty = self.air.typeOfIndex(inst);
const result_llvm_ty = try self.dg.llvmType(result_ty);
+
+ const target = self.dg.module.getTarget();
+ const bits = operand_ty.intInfo(target).bits;
const result_bits = result_ty.intInfo(target).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
@@ -4370,29 +4354,17 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const target = self.dg.module.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- const vec_len: ?u32 = switch (operand_ty.zigTypeTag()) {
- .Vector => operand_ty.vectorLen(),
- else => null,
- };
-
- var fn_name_buf: [100]u8 = undefined;
- const llvm_fn_name = if (vec_len) |len|
- std.fmt.bufPrintZ(&fn_name_buf, "llvm.ctpop.v{d}i{d}", .{ len, bits }) catch unreachable
- else
- std.fmt.bufPrintZ(&fn_name_buf, "llvm.ctpop.i{d}", .{bits}) catch unreachable;
- const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
- const param_types = [_]*const llvm.Type{operand_llvm_ty};
- const fn_type = llvm.functionType(operand_llvm_ty, ¶m_types, param_types.len, .False);
- break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
- };
const params = [_]*const llvm.Value{operand};
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const fn_val = self.getIntrinsic("llvm.ctpop", &.{operand_llvm_ty});
+
const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, "");
const result_ty = self.air.typeOfIndex(inst);
const result_llvm_ty = try self.dg.llvmType(result_ty);
+
+ const target = self.dg.module.getTarget();
+ const bits = operand_ty.intInfo(target).bits;
const result_bits = result_ty.intInfo(target).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
From dc6553d93eb856a1593d43f0d1ea387c74fa7b44 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 14 Feb 2022 20:08:04 -0700
Subject: [PATCH 0201/2031] CI: update download page and langref for 0.9.1
---
ci/srht/index.json | 71 +++++++++++++++++++++++++++++++++++++++++++++
doc/langref.html.in | 2 +-
2 files changed, 72 insertions(+), 1 deletion(-)
diff --git a/ci/srht/index.json b/ci/srht/index.json
index 98e8f4035b..aa0533af39 100644
--- a/ci/srht/index.json
+++ b/ci/srht/index.json
@@ -40,6 +40,77 @@
"size": "{{AARCH64_LINUX_BYTESIZE}}"
}
},
+ "0.9.1": {
+ "date": "2022-02-14",
+ "docs": "https://ziglang.org/documentation/0.9.1/",
+ "stdDocs": "https://ziglang.org/documentation/0.9.1/std/",
+ "notes": "https://ziglang.org/download/0.9.1/release-notes.html",
+ "src": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-0.9.1.tar.xz",
+ "shasum": "38cf4e84481f5facc766ba72783e7462e08d6d29a5d47e3b75c8ee3142485210",
+ "size": "13940828"
+ },
+ "bootstrap": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-bootstrap-0.9.1.tar.xz",
+ "shasum": "0a8e221c71860d8975c15662b3ed3bd863e81c4fe383455a596e5e0e490d6109",
+ "size": "42488812"
+ },
+ "x86_64-freebsd": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-freebsd-x86_64-0.9.1.tar.xz",
+ "shasum": "4e06009bd3ede34b72757eec1b5b291b30aa0d5046dadd16ecb6b34a02411254",
+ "size": "39028848"
+ },
+ "aarch64-linux": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-linux-aarch64-0.9.1.tar.xz",
+ "shasum": "5d99a39cded1870a3fa95d4de4ce68ac2610cca440336cfd252ffdddc2b90e66",
+ "size": "37034860"
+ },
+ "armv7a-linux": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-linux-armv7a-0.9.1.tar.xz",
+ "shasum": "6de64456cb4757a555816611ea697f86fba7681d8da3e1863fa726a417de49be",
+ "size": "37974652"
+ },
+ "i386-linux": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-linux-i386-0.9.1.tar.xz",
+ "shasum": "e776844fecd2e62fc40d94718891057a1dbca1816ff6013369e9a38c874374ca",
+ "size": "44969172"
+ },
+ "riscv64-linux": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-linux-riscv64-0.9.1.tar.xz",
+ "shasum": "208dea53662c2c52777bd9e3076115d2126a4f71aed7f2ff3b8fe224dc3881aa",
+ "size": "39390868"
+ },
+ "x86_64-linux": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-linux-x86_64-0.9.1.tar.xz",
+ "shasum": "be8da632c1d3273f766b69244d80669fe4f5e27798654681d77c992f17c237d7",
+ "size": "41011464"
+ },
+ "aarch64-macos": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-macos-aarch64-0.9.1.tar.xz",
+ "shasum": "8c473082b4f0f819f1da05de2dbd0c1e891dff7d85d2c12b6ee876887d438287",
+ "size": "38995640"
+ },
+ "x86_64-macos": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-macos-x86_64-0.9.1.tar.xz",
+ "shasum": "2d94984972d67292b55c1eb1c00de46580e9916575d083003546e9a01166754c",
+ "size": "43713044"
+ },
+ "i386-windows": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-windows-i386-0.9.1.zip",
+ "shasum": "74a640ed459914b96bcc572183a8db687bed0af08c30d2ea2f8eba03ae930f69",
+ "size": "67929868"
+ },
+ "x86_64-windows": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-windows-x86_64-0.9.1.zip",
+ "shasum": "443da53387d6ae8ba6bac4b3b90e9fef4ecbe545e1c5fa3a89485c36f5c0e3a2",
+ "size": "65047697"
+ },
+ "aarch64-windows": {
+ "tarball": "https://ziglang.org/download/0.9.1/zig-windows-aarch64-0.9.1.zip",
+ "shasum": "621bf95f54dc3ff71466c5faae67479419951d7489e40e87fd26d195825fb842",
+ "size": "61478151"
+ }
+ },
"0.9.0": {
"date": "2021-12-20",
"docs": "https://ziglang.org/documentation/0.9.0/",
diff --git a/doc/langref.html.in b/doc/langref.html.in
index c0b8c9cb48..24b976ee42 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -316,7 +316,7 @@
0.6.0 |
0.7.1 |
0.8.1 |
- 0.9.0 |
+ 0.9.1 |
master