std.Target: Incorporate the Abi tag in VersionRange.default().
This is necessary to pick out the correct minimum OS version from the std.zig.target.available_libcs list.
This commit is contained in:
8
lib/compiler/aro/aro/target.zig
vendored
8
lib/compiler/aro/aro/target.zig
vendored
@@ -719,7 +719,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
||||
test "alignment functions - smoke test" {
|
||||
var target: std.Target = undefined;
|
||||
const x86 = std.Target.Cpu.Arch.x86_64;
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86);
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none);
|
||||
target.cpu = std.Target.Cpu.baseline(x86, target.os);
|
||||
target.abi = std.Target.Abi.default(x86, target.os);
|
||||
|
||||
@@ -732,7 +732,7 @@ test "alignment functions - smoke test" {
|
||||
try std.testing.expect(systemCompiler(target) == .gcc);
|
||||
|
||||
const arm = std.Target.Cpu.Arch.arm;
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm);
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
|
||||
target.cpu = std.Target.Cpu.baseline(arm, target.os);
|
||||
target.abi = std.Target.Abi.default(arm, target.os);
|
||||
|
||||
@@ -751,7 +751,7 @@ test "target size/align tests" {
|
||||
const x86 = std.Target.Cpu.Arch.x86;
|
||||
comp.target.cpu.arch = x86;
|
||||
comp.target.cpu.model = &std.Target.x86.cpu.i586;
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86);
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none);
|
||||
comp.target.abi = std.Target.Abi.gnu;
|
||||
|
||||
const tt: Type = .{
|
||||
@@ -763,7 +763,7 @@ test "target size/align tests" {
|
||||
|
||||
const arm = std.Target.Cpu.Arch.arm;
|
||||
comp.target.cpu = std.Target.Cpu.Model.toCpu(&std.Target.arm.cpu.cortex_r4, arm);
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm);
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
|
||||
comp.target.abi = std.Target.Abi.none;
|
||||
|
||||
const ct: Type = .{
|
||||
|
||||
@@ -148,10 +148,10 @@ pub const Os = struct {
|
||||
return (tag == .hurd or tag == .linux) and abi.isGnu();
|
||||
}
|
||||
|
||||
pub fn defaultVersionRange(tag: Tag, arch: Cpu.Arch) Os {
|
||||
pub fn defaultVersionRange(tag: Tag, arch: Cpu.Arch, abi: Abi) Os {
|
||||
return .{
|
||||
.tag = tag,
|
||||
.version_range = VersionRange.default(tag, arch),
|
||||
.version_range = .default(arch, tag, abi),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -416,7 +416,7 @@ pub const Os = struct {
|
||||
|
||||
/// The default `VersionRange` represents the range that the Zig Standard Library
|
||||
/// bases its abstractions on.
|
||||
pub fn default(tag: Tag, arch: Cpu.Arch) VersionRange {
|
||||
pub fn default(arch: Cpu.Arch, tag: Tag, abi: Abi) VersionRange {
|
||||
return switch (tag) {
|
||||
.freestanding,
|
||||
.other,
|
||||
@@ -475,16 +475,26 @@ pub const Os = struct {
|
||||
.linux => .{
|
||||
.linux = .{
|
||||
.range = .{
|
||||
.min = .{ .major = 4, .minor = 19, .patch = 0 },
|
||||
.min = blk: {
|
||||
const default_min: std.SemanticVersion = .{ .major = 4, .minor = 19, .patch = 0 };
|
||||
|
||||
for (std.zig.target.available_libcs) |libc| {
|
||||
if (libc.arch != arch or libc.os != tag or libc.abi != abi) continue;
|
||||
|
||||
if (libc.os_ver) |min| {
|
||||
if (min.order(default_min) == .gt) break :blk min;
|
||||
}
|
||||
}
|
||||
|
||||
break :blk default_min;
|
||||
},
|
||||
.max = .{ .major = 6, .minor = 11, .patch = 5 },
|
||||
},
|
||||
.glibc = blk: {
|
||||
const default_min: std.SemanticVersion = .{ .major = 2, .minor = 28, .patch = 0 };
|
||||
|
||||
for (std.zig.target.available_libcs) |libc| {
|
||||
// We don't know the ABI here. We can get away with not checking it
|
||||
// for now, but that may not always remain true.
|
||||
if (libc.os != tag or libc.arch != arch) continue;
|
||||
if (libc.os != tag or libc.arch != arch or libc.abi != abi) continue;
|
||||
|
||||
if (libc.glibc_min) |min| {
|
||||
if (min.order(default_min) == .gt) break :blk min;
|
||||
|
||||
@@ -181,8 +181,12 @@ pub const DetectError = error{
|
||||
/// components by detecting the native system, and then resolves
|
||||
/// standard/default parts relative to that.
|
||||
pub fn resolveTargetQuery(query: Target.Query) DetectError!Target {
|
||||
// Until https://github.com/ziglang/zig/issues/4592 is implemented (support detecting the
|
||||
// native CPU architecture as being different than the current target), we use this:
|
||||
const query_cpu_arch = query.cpu_arch orelse builtin.cpu.arch;
|
||||
const query_os_tag = query.os_tag orelse builtin.os.tag;
|
||||
var os = query_os_tag.defaultVersionRange(query.cpu_arch orelse builtin.cpu.arch);
|
||||
const query_abi = query.abi orelse builtin.abi;
|
||||
var os = query_os_tag.defaultVersionRange(query_cpu_arch, query_abi);
|
||||
if (query.os_tag == null) {
|
||||
switch (builtin.target.os.tag) {
|
||||
.linux => {
|
||||
@@ -338,29 +342,58 @@ pub fn resolveTargetQuery(query: Target.Query) DetectError!Target {
|
||||
os.version_range.linux.android = android;
|
||||
}
|
||||
|
||||
// Until https://github.com/ziglang/zig/issues/4592 is implemented (support detecting the
|
||||
// native CPU architecture as being different than the current target), we use this:
|
||||
const cpu_arch = query.cpu_arch orelse builtin.cpu.arch;
|
||||
|
||||
const cpu = switch (query.cpu_model) {
|
||||
.native => detectNativeCpuAndFeatures(cpu_arch, os, query),
|
||||
.baseline => Target.Cpu.baseline(cpu_arch, os),
|
||||
.native => detectNativeCpuAndFeatures(query_cpu_arch, os, query),
|
||||
.baseline => Target.Cpu.baseline(query_cpu_arch, os),
|
||||
.determined_by_arch_os => if (query.cpu_arch == null)
|
||||
detectNativeCpuAndFeatures(cpu_arch, os, query)
|
||||
detectNativeCpuAndFeatures(query_cpu_arch, os, query)
|
||||
else
|
||||
Target.Cpu.baseline(cpu_arch, os),
|
||||
.explicit => |model| model.toCpu(cpu_arch),
|
||||
Target.Cpu.baseline(query_cpu_arch, os),
|
||||
.explicit => |model| model.toCpu(query_cpu_arch),
|
||||
} orelse backup_cpu_detection: {
|
||||
break :backup_cpu_detection Target.Cpu.baseline(cpu_arch, os);
|
||||
break :backup_cpu_detection Target.Cpu.baseline(query_cpu_arch, os);
|
||||
};
|
||||
|
||||
var result = try detectAbiAndDynamicLinker(cpu, os, query);
|
||||
|
||||
// It's possible that we detect the native ABI, but fail to detect the OS version or were told
|
||||
// to use the default OS version range. In that case, while we can't determine the exact native
|
||||
// OS version, we do at least know that some ABIs require a particular OS version (by way of
|
||||
// `std.zig.target.available_libcs`). So in this case, adjust the OS version to the minimum that
|
||||
// we know is required.
|
||||
if (result.abi != query_abi and query.os_version_min == null) {
|
||||
const result_ver_range = &result.os.version_range;
|
||||
const abi_ver_range = result.os.tag.defaultVersionRange(result.cpu.arch, result.abi).version_range;
|
||||
|
||||
switch (result.os.tag.versionRangeTag()) {
|
||||
.none => {},
|
||||
.semver => if (result_ver_range.semver.min.order(abi_ver_range.semver.min) == .lt) {
|
||||
result_ver_range.semver.min = abi_ver_range.semver.min;
|
||||
},
|
||||
inline .hurd, .linux => |t| {
|
||||
if (@field(result_ver_range, @tagName(t)).range.min.order(@field(abi_ver_range, @tagName(t)).range.min) == .lt) {
|
||||
@field(result_ver_range, @tagName(t)).range.min = @field(abi_ver_range, @tagName(t)).range.min;
|
||||
}
|
||||
|
||||
if (@field(result_ver_range, @tagName(t)).glibc.order(@field(abi_ver_range, @tagName(t)).glibc) == .lt and
|
||||
query.glibc_version == null)
|
||||
{
|
||||
@field(result_ver_range, @tagName(t)).glibc = @field(abi_ver_range, @tagName(t)).glibc;
|
||||
}
|
||||
},
|
||||
.windows => if (!result_ver_range.windows.min.isAtLeast(abi_ver_range.windows.min)) {
|
||||
result_ver_range.windows.min = abi_ver_range.windows.min;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// For x86, we need to populate some CPU feature flags depending on architecture
|
||||
// and mode:
|
||||
// * 16bit_mode => if the abi is code16
|
||||
// * 32bit_mode => if the arch is x86
|
||||
// However, the "mode" flags can be used as overrides, so if the user explicitly
|
||||
// sets one of them, that takes precedence.
|
||||
switch (cpu_arch) {
|
||||
switch (result.cpu.arch) {
|
||||
.x86 => {
|
||||
if (!Target.x86.featureSetHasAny(query.cpu_features_add, .{
|
||||
.@"16bit_mode", .@"32bit_mode",
|
||||
@@ -388,12 +421,12 @@ pub fn resolveTargetQuery(query: Target.Query) DetectError!Target {
|
||||
}
|
||||
updateCpuFeatures(
|
||||
&result.cpu.features,
|
||||
cpu_arch.allFeaturesList(),
|
||||
result.cpu.arch.allFeaturesList(),
|
||||
query.cpu_features_add,
|
||||
query.cpu_features_sub,
|
||||
);
|
||||
|
||||
if (cpu_arch == .hexagon) {
|
||||
if (result.cpu.arch == .hexagon) {
|
||||
// Both LLVM and LLD have broken support for the small data area. Yet LLVM has the feature
|
||||
// on by default for all Hexagon CPUs. Clang sort of solves this by defaulting the `-gpsize`
|
||||
// command line parameter for the Hexagon backend to 0, so that no constants get placed in
|
||||
|
||||
Reference in New Issue
Block a user