Merge pull request #10862 from ziglang/elf-lower-slices

stage2: native backends: lower const slices
This commit is contained in:
Jakub Konka
2022-02-11 15:02:17 +01:00
committed by GitHub
17 changed files with 411 additions and 162 deletions

View File

@@ -1617,7 +1617,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{ .extern_fn = n_strx },
.data = .{
.extern_fn = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.sym_name = n_strx,
},
},
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -2485,9 +2490,18 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
},
.memory => |addr| {
const owner_decl = self.mod_fn.owner_decl;
// TODO when refactoring LinkBlock, make this into a generic function.
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.local_sym_index,
.elf => owner_decl.link.elf.local_sym_index,
.plan9 => @intCast(u32, owner_decl.link.plan9.sym_index orelse 0),
else => return self.fail("TODO handle aarch64 load memory in {}", .{self.bin_file.tag}),
};
_ = try self.addInst(.{
.tag = .load_memory,
.data = .{ .payload = try self.addExtra(Mir.LoadMemory{
.atom_index = atom_index,
.register = @enumToInt(reg),
.addr = @intCast(u32, addr),
}) },

View File

@@ -537,7 +537,7 @@ fn mirDebugEpilogueBegin(self: *Emit) !void {
fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
assert(emit.mir.instructions.items(.tag)[inst] == .call_extern);
const n_strx = emit.mir.instructions.items(.data)[inst].extern_fn;
const extern_fn = emit.mir.instructions.items(.data)[inst].extern_fn;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const offset = blk: {
@@ -547,9 +547,10 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk offset;
};
// Add relocation to the decl.
try macho_file.active_decl.?.link.macho.relocs.append(emit.bin_file.allocator, .{
const atom = macho_file.atom_by_index_table.get(extern_fn.atom_index).?;
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
.target = .{ .global = n_strx },
.target = .{ .global = extern_fn.sym_name },
.addend = 0,
.subtractor = null,
.pcrel = true,
@@ -613,10 +614,9 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
));
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// TODO I think the reloc might be in the wrong place.
const decl = macho_file.active_decl.?;
const atom = macho_file.atom_by_index_table.get(load_memory.atom_index).?;
// Page reloc for adrp instruction.
try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
.target = .{ .local = addr },
.addend = 0,
@@ -626,7 +626,7 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_GOT_LOAD_PAGE21),
});
// Pageoff reloc for adrp instruction.
try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset + 4,
.target = .{ .local = addr },
.addend = 0,

View File

@@ -134,7 +134,12 @@ pub const Inst = struct {
/// An extern function
///
/// Used by e.g. call_extern
extern_fn: u32,
extern_fn: struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's string table.
sym_name: u32,
},
/// A 16-bit immediate value.
///
/// Used by e.g. svc
@@ -278,6 +283,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
}
pub const LoadMemory = struct {
atom_index: u32,
register: u32,
addr: u32,
};

View File

@@ -3931,23 +3931,20 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen();
// Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
// the Sema code needs to use anonymous Decls or alloca instructions to store data.
const ptr_imm = ptr_mcv.memory;
_ = slice_len;
_ = ptr_imm;
// We need more general support for const data being stored in memory to make this work.
return self.fail("TODO codegen for const slices", .{});
return self.lowerUnnamedConst(typed_value);
},
else => {
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
switch (typed_value.val.tag()) {
.int_u64 => {
return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
},
.slice => {
return self.lowerUnnamedConst(typed_value);
},
else => {
return self.fail("TODO codegen more kinds of const pointers", .{});
},
}
return self.fail("TODO codegen more kinds of const pointers", .{});
},
},
.Int => {

View File

@@ -1897,7 +1897,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.reg1 = addr_reg.to64(),
.flags = flags,
}).encode(),
.data = .{ .linker_sym_index = sym_index },
.data = .{
.load_reloc = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.sym_index = sym_index,
},
},
});
break :blk addr_reg;
},
@@ -2670,7 +2675,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
.data = .{ .extern_fn = n_strx },
.data = .{
.extern_fn = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.sym_name = n_strx,
},
},
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -3514,8 +3524,14 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
else => return self.fail("TODO implement args on stack for {} with abi size > 8", .{mcv}),
}
},
.embedded_in_code => {
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
}
return self.fail("TODO implement args on stack for {} with abi size > 8", .{mcv});
},
.memory,
.embedded_in_code,
.direct_load,
.got_load,
=> {
@@ -3523,7 +3539,63 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
}
return self.fail("TODO implement memcpy for setting args on stack from {}", .{mcv});
self.register_manager.freezeRegs(&.{ .rax, .rcx });
defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx });
const addr_reg: Register = blk: {
switch (mcv) {
.got_load,
.direct_load,
=> |sym_index| {
const flags: u2 = switch (mcv) {
.got_load => 0b00,
.direct_load => 0b01,
else => unreachable,
};
const addr_reg = try self.register_manager.allocReg(null);
_ = try self.addInst(.{
.tag = .lea_pie,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.flags = flags,
}).encode(),
.data = .{
.load_reloc = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.sym_index = sym_index,
},
},
});
break :blk addr_reg;
},
.memory => |addr| {
const addr_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr });
break :blk addr_reg;
},
else => unreachable,
}
};
self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg});
const regs = try self.register_manager.allocRegs(2, .{ null, null });
const count_reg = regs[0];
const tmp_reg = regs[1];
try self.register_manager.getReg(.rax, null);
try self.register_manager.getReg(.rcx, null);
// TODO allow for abi_size to be u64
try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) });
try self.genInlineMemcpy(
-(stack_offset + @intCast(i32, abi_size)),
.rsp,
addr_reg.to64(),
count_reg.to64(),
tmp_reg.to8(),
);
},
.register => |reg| {
_ = try self.addInst(.{
@@ -3710,6 +3782,30 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr });
break :blk reg;
},
.direct_load,
.got_load,
=> |sym_index| {
const flags: u2 = switch (mcv) {
.got_load => 0b00,
.direct_load => 0b01,
else => unreachable,
};
const addr_reg = try self.register_manager.allocReg(null);
_ = try self.addInst(.{
.tag = .lea_pie,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.flags = flags,
}).encode(),
.data = .{
.load_reloc = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.sym_index = sym_index,
},
},
});
break :blk addr_reg;
},
else => {
return self.fail("TODO implement memcpy for setting stack from {}", .{mcv});
},
@@ -4145,7 +4241,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.reg1 = reg,
.flags = flags,
}).encode(),
.data = .{ .linker_sym_index = sym_index },
.data = .{
.load_reloc = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.sym_index = sym_index,
},
},
});
// MOV reg, [reg]
_ = try self.addInst(.{
@@ -4488,6 +4589,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty, tv.val });
const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
@@ -4520,23 +4622,20 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen();
// Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
// the Sema code needs to use anonymous Decls or alloca instructions to store data.
const ptr_imm = ptr_mcv.memory;
_ = slice_len;
_ = ptr_imm;
// We need more general support for const data being stored in memory to make this work.
return self.fail("TODO codegen for const slices", .{});
return self.lowerUnnamedConst(typed_value);
},
else => {
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
switch (typed_value.val.tag()) {
.int_u64 => {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
},
.slice => {
return self.lowerUnnamedConst(typed_value);
},
else => {
return self.fail("TODO codegen more kinds of const pointers: {}", .{typed_value.val.tag()});
},
}
return self.fail("TODO codegen more kinds of const pointers: {}", .{typed_value.val.tag()});
},
},
.Int => {

View File

@@ -763,6 +763,7 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .lea_pie);
const ops = Mir.Ops.decode(emit.mir.instructions.items(.ops)[inst]);
const load_reloc = emit.mir.instructions.items(.data)[inst].load_reloc;
// lea reg1, [rip + reloc]
// RM
@@ -772,18 +773,19 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
emit.code,
);
const end_offset = emit.code.items.len;
const sym_index = emit.mir.instructions.items(.data)[inst].linker_sym_index;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const reloc_type = switch (ops.flags) {
0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => return emit.fail("TODO unused LEA PIE variants 0b10 and 0b11", .{}),
};
const decl = macho_file.active_decl.?;
try decl.link.macho.relocs.append(emit.bin_file.allocator, .{
const atom = macho_file.atom_by_index_table.get(load_reloc.atom_index).?;
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = @intCast(u32, end_offset - 4),
.target = .{ .local = sym_index },
.target = .{ .local = load_reloc.sym_index },
.addend = 0,
.subtractor = null,
.pcrel = true,
@@ -801,17 +803,20 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .call_extern);
const n_strx = emit.mir.instructions.items(.data)[inst].extern_fn;
const extern_fn = emit.mir.instructions.items(.data)[inst].extern_fn;
const offset = blk: {
// callq
try lowerToDEnc(.call_near, 0, emit.code);
break :blk @intCast(u32, emit.code.items.len) - 4;
};
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
try macho_file.active_decl.?.link.macho.relocs.append(emit.bin_file.allocator, .{
const atom = macho_file.atom_by_index_table.get(extern_fn.atom_index).?;
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
.target = .{ .global = n_strx },
.target = .{ .global = extern_fn.sym_name },
.addend = 0,
.subtractor = null,
.pcrel = true,

View File

@@ -185,7 +185,7 @@ pub const Inst = struct {
/// 0b00 reg1, [rip + reloc] // via GOT emits X86_64_RELOC_GOT relocation
/// 0b01 reg1, [rip + reloc] // direct load emits X86_64_RELOC_SIGNED relocation
/// Notes:
/// * `Data` contains `linker_sym_index`
/// * `Data` contains `load_reloc`
lea_pie,
/// ops flags: form:
@@ -350,10 +350,19 @@ pub const Inst = struct {
/// A 32-bit immediate value.
imm: u32,
/// An extern function.
/// Index into the linker's string table.
extern_fn: u32,
/// Entry in the linker's symbol table.
linker_sym_index: u32,
extern_fn: struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's string table.
sym_name: u32,
},
/// PIE load relocation.
load_reloc: struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's symbol table.
sym_index: u32,
},
/// Index into `extra`. Meaning of what can be found there is context-dependent.
payload: u32,
};
@@ -362,7 +371,7 @@ pub const Inst = struct {
// Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
comptime {
if (builtin.mode != .Debug) {
assert(@sizeOf(Inst) == 8);
assert(@sizeOf(Data) == 8);
}
}
};

View File

@@ -450,6 +450,7 @@ fn mirLea(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
fn mirLeaPie(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const load_reloc = print.mir.instructions.items(.data)[inst].load_reloc;
try w.print("lea {s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
@@ -459,9 +460,8 @@ fn mirLeaPie(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
else => unreachable,
}
try w.print("[rip + 0x0] ", .{});
const sym_index = print.mir.instructions.items(.data)[inst].linker_sym_index;
if (print.bin_file.cast(link.File.MachO)) |macho_file| {
const target = macho_file.locals.items[sym_index];
const target = macho_file.locals.items[load_reloc.sym_index];
const target_name = macho_file.getString(target.n_strx);
try w.print("target@{s}", .{target_name});
} else {

View File

@@ -142,6 +142,7 @@ pub fn generateFunction(
pub fn generateSymbol(
bin_file: *link.File,
parent_atom_index: u32,
src_loc: Module.SrcLoc,
typed_value: TypedValue,
code: *std.ArrayList(u8),
@@ -177,7 +178,7 @@ pub fn generateSymbol(
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureUnusedCapacity(payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
}, code, debug_output)) {
@@ -197,7 +198,7 @@ pub fn generateSymbol(
const elem_vals = typed_value.val.castTag(.array).?.data;
const elem_ty = typed_value.ty.elemType();
for (elem_vals) |elem_val| {
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = elem_ty,
.val = elem_val,
}, code, debug_output)) {
@@ -223,20 +224,19 @@ pub fn generateSymbol(
.Pointer => switch (typed_value.val.tag()) {
.variable => {
const decl = typed_value.val.castTag(.variable).?.data.owner_decl;
return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output);
return lowerDeclRef(bin_file, parent_atom_index, src_loc, typed_value, decl, code, debug_output);
},
.decl_ref => {
const decl = typed_value.val.castTag(.decl_ref).?.data;
return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output);
return lowerDeclRef(bin_file, parent_atom_index, src_loc, typed_value, decl, code, debug_output);
},
.slice => {
// TODO populate .debug_info for the slice
const slice = typed_value.val.castTag(.slice).?.data;
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf);
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = slice_ptr_field_type,
.val = slice.ptr,
}, code, debug_output)) {
@@ -248,7 +248,7 @@ pub fn generateSymbol(
}
// generate length
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = Type.initTag(.usize),
.val = slice.len,
}, code, debug_output)) {
@@ -392,7 +392,7 @@ pub fn generateSymbol(
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = field_ty,
.val = field_val,
}, code, debug_output)) {
@@ -447,6 +447,7 @@ pub fn generateSymbol(
fn lowerDeclRef(
bin_file: *link.File,
parent_atom_index: u32,
src_loc: Module.SrcLoc,
typed_value: TypedValue,
decl: *Module.Decl,
@@ -457,7 +458,7 @@ fn lowerDeclRef(
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf);
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = slice_ptr_field_type,
.val = typed_value.val,
}, code, debug_output)) {
@@ -473,7 +474,7 @@ fn lowerDeclRef(
.base = .{ .tag = .int_u64 },
.data = typed_value.val.sliceLen(),
};
switch (try generateSymbol(bin_file, src_loc, .{
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
.ty = Type.initTag(.usize),
.val = Value.initPayload(&slice_len.base),
}, code, debug_output)) {
@@ -496,15 +497,7 @@ fn lowerDeclRef(
}
decl.markAlive();
const vaddr = vaddr: {
if (bin_file.cast(link.File.MachO)) |macho_file| {
break :vaddr try macho_file.getDeclVAddrWithReloc(decl, code.items.len);
}
// TODO handle the dependency of this symbol on the decl's vaddr.
// If the decl changes vaddr, then this symbol needs to get regenerated.
break :vaddr bin_file.getDeclVAddr(decl);
};
const vaddr = try bin_file.getDeclVAddr(decl, parent_atom_index, code.items.len);
const endian = target.cpu.arch.endian();
switch (ptr_width) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian),

View File

@@ -684,12 +684,16 @@ pub const File = struct {
}
}
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
/// Get allocated `Decl`'s address in virtual memory.
/// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's
/// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the
/// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory.
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl),
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
.plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl, parent_atom_index, offset),
.c => unreachable,
.wasm => unreachable,
.spirv => unreachable,

View File

@@ -726,7 +726,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
const res = try codegen.generateSymbol(&self.base, 0, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl.val,
}, &code_buffer, .none);
@@ -751,7 +751,7 @@ fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []co
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
if (need_realloc) {
const curr_vaddr = self.getDeclVAddr(decl);
const curr_vaddr = self.text_section_virtual_address + decl.link.coff.text_offset;
const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
if (vaddr != curr_vaddr) {
@@ -1465,7 +1465,9 @@ fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 {
return null;
}
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
_ = parent_atom_index;
_ = offset;
assert(self.llvm_object == null);
return self.text_section_virtual_address + decl.link.coff.text_offset;
}

View File

@@ -145,6 +145,7 @@ decls: std.AutoHashMapUnmanaged(*Module.Decl, ?u16) = .{},
/// at present owned by Module.Decl.
/// TODO consolidate this.
managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
atom_by_index_table: std.AutoHashMapUnmanaged(u32, *TextBlock) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -179,6 +180,18 @@ dbg_info_decl_free_list: std.AutoHashMapUnmanaged(*TextBlock, void) = .{},
dbg_info_decl_first: ?*TextBlock = null,
dbg_info_decl_last: ?*TextBlock = null,
/// A table of relocations indexed by the owning them `TextBlock`.
/// Note that once we refactor `TextBlock`'s lifetime and ownership rules,
/// this will be a table indexed by index into the list of Atoms.
relocs: RelocTable = .{},
const Reloc = struct {
target: u32,
offset: u64,
prev_vaddr: u64,
};
const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*TextBlock));
/// When allocating, the ideal_capacity is calculated by
@@ -397,12 +410,36 @@ pub fn deinit(self: *Elf) void {
}
self.unnamed_const_atoms.deinit(self.base.allocator);
}
{
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
relocs.deinit(self.base.allocator);
}
self.relocs.deinit(self.base.allocator);
}
self.atom_by_index_table.deinit(self.base.allocator);
}
pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl) u64 {
pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
assert(self.llvm_object == null);
assert(decl.link.elf.local_sym_index != 0);
return self.local_symbols.items[decl.link.elf.local_sym_index].st_value;
const target = decl.link.elf.local_sym_index;
const vaddr = self.local_symbols.items[target].st_value;
const atom = self.atom_by_index_table.get(parent_atom_index).?;
const gop = try self.relocs.getOrPut(self.base.allocator, atom);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(self.base.allocator, .{
.target = target,
.offset = offset,
.prev_vaddr = vaddr,
});
return vaddr;
}
fn getDebugLineProgramOff(self: Elf) u32 {
@@ -991,6 +1028,41 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
.p64 => 12,
};
{
var it = self.relocs.iterator();
while (it.next()) |entry| {
const atom = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
const source_sym = self.local_symbols.items[atom.local_sym_index];
const source_shdr = self.sections.items[source_sym.st_shndx];
log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
for (relocs.items) |*reloc| {
const target_sym = self.local_symbols.items[reloc.target];
const target_vaddr = target_sym.st_value;
if (target_vaddr == reloc.prev_vaddr) continue;
const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
const file_offset = source_shdr.sh_offset + section_offset;
log.debug(" ({x}: [() => 0x{x}] ({s}))", .{
reloc.offset,
target_vaddr,
self.getString(target_sym.st_name),
});
switch (self.ptr_width) {
.p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@intCast(u32, target_vaddr)), file_offset),
.p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset),
}
reloc.prev_vaddr = target_vaddr;
}
}
}
// Unfortunately these have to be buffered and done at the end because ELF does not allow
// mixing local and global symbols within a symbol table.
try self.writeAllGlobalSymbols();
@@ -2508,6 +2580,7 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
log.debug("allocating symbol indexes for {s}", .{decl.name});
decl.link.elf.local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.elf.local_sym_index, &decl.link.elf);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.elf.offset_table_index = i;
@@ -2525,6 +2598,7 @@ fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void {
self.freeTextBlock(atom, self.phdr_load_ro_index.?);
self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
self.local_symbols.items[atom.local_sym_index].st_info = 0;
_ = self.atom_by_index_table.remove(atom.local_sym_index);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -2543,11 +2617,11 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
if (decl.link.elf.local_sym_index != 0) {
self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {};
self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0;
_ = self.atom_by_index_table.remove(decl.link.elf.local_sym_index);
decl.link.elf.local_sym_index = 0;
self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
}
// TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
// is desired for both.
@@ -2993,7 +3067,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
const res = try codegen.generateSymbol(&self.base, decl.link.elf.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@@ -3028,19 +3102,6 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
}
const unnamed_consts = gop.value_ptr;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = .{},
});
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return error.AnalysisFail;
},
};
const atom = try self.base.allocator.create(TextBlock);
errdefer self.base.allocator.destroy(atom);
atom.* = TextBlock.empty;
@@ -3056,6 +3117,20 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
log.debug("allocating symbol indexes for {s}", .{name});
atom.local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, atom.local_sym_index, atom);
const res = try codegen.generateSymbol(&self.base, atom.local_sym_index, decl.srcLoc(), typed_value, &code_buffer, .{
.none = .{},
});
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return error.AnalysisFail;
},
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const phdr_index = self.phdr_load_ro_index.?;

View File

@@ -40,6 +40,7 @@ const StringIndexContext = std.hash_map.StringIndexContext;
const Trie = @import("MachO/Trie.zig");
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
pub const TextBlock = Atom;
@@ -220,6 +221,7 @@ atoms: std.AutoHashMapUnmanaged(MatchingSection, *Atom) = .{},
/// at present owned by Module.Decl.
/// TODO consolidate this.
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -248,12 +250,6 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// TODO consolidate this.
decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, ?MatchingSection) = .{},
/// Currently active Module.Decl.
/// TODO this might not be necessary if we figure out how to pass Module.Decl instance
/// to codegen.genSetReg() or alternatively move PIE displacement for MCValue{ .memory = x }
/// somewhere else in the codegen.
active_decl: ?*Module.Decl = null,
const Entry = struct {
target: Atom.Relocation.Target,
atom: *Atom,
@@ -3441,6 +3437,8 @@ pub fn deinit(self: *MachO) void {
}
self.unnamed_const_atoms.deinit(self.base.allocator);
}
self.atom_by_index_table.deinit(self.base.allocator);
}
pub fn closeFiles(self: MachO) void {
@@ -3647,6 +3645,7 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
if (decl.link.macho.local_sym_index != 0) return;
decl.link.macho.local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.macho.local_sym_index, &decl.link.macho);
try self.decls.putNoClobber(self.base.allocator, decl, null);
const got_target = .{ .local = decl.link.macho.local_sym_index };
@@ -3693,8 +3692,6 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
}
}
self.active_decl = decl;
const res = if (debug_buffers) |dbg|
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
.dwarf = .{
@@ -3745,7 +3742,22 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
}
const unnamed_consts = gop.value_ptr;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
const name_str_index = blk: {
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl.name, index });
defer self.base.allocator.free(name);
break :blk try self.makeString(name);
};
const name = self.getString(name_str_index);
log.debug("allocating symbol indexes for {s}", .{name});
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const local_sym_index = try self.allocateLocalSymbol();
const atom = try self.createEmptyAtom(local_sym_index, @sizeOf(u64), math.log2(required_alignment));
try self.atom_by_index_table.putNoClobber(self.base.allocator, local_sym_index, atom);
const res = try codegen.generateSymbol(&self.base, local_sym_index, decl.srcLoc(), typed_value, &code_buffer, .{
.none = .{},
});
const code = switch (res) {
@@ -3758,26 +3770,10 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
},
};
const name_str_index = blk: {
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl.name, index });
defer self.base.allocator.free(name);
break :blk try self.makeString(name);
};
const name = self.getString(name_str_index);
atom.code.clearRetainingCapacity();
try atom.code.appendSlice(self.base.allocator, code);
log.debug("allocating symbol indexes for {s}", .{name});
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const match = (try self.getMatchingSection(.{
.segname = makeStaticString("__TEXT"),
.sectname = makeStaticString("__const"),
.size = code.len,
.@"align" = math.log2(required_alignment),
})).?;
const local_sym_index = try self.allocateLocalSymbol();
const atom = try self.createEmptyAtom(local_sym_index, code.len, math.log2(required_alignment));
mem.copy(u8, atom.code.items, code);
const match = try self.getMatchingSectionAtom(atom, typed_value.ty, typed_value.val);
const addr = try self.allocateAtom(atom, code.len, required_alignment, match);
log.debug("allocated atom for {s} at 0x{x}", .{ name, addr });
@@ -3837,11 +3833,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
}
}
self.active_decl = decl;
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = if (debug_buffers) |dbg|
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.link.macho.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@@ -3852,7 +3846,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
},
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.link.macho.local_sym_index, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none);
@@ -3906,13 +3900,11 @@ fn isElemTyPointer(ty: Type) bool {
}
}
fn getMatchingSectionDecl(self: *MachO, decl: *Module.Decl) !MatchingSection {
const code = decl.link.macho.code.items;
const alignment = decl.ty.abiAlignment(self.base.options.target);
fn getMatchingSectionAtom(self: *MachO, atom: *Atom, ty: Type, val: Value) !MatchingSection {
const code = atom.code.items;
const alignment = ty.abiAlignment(self.base.options.target);
const align_log_2 = math.log2(alignment);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
const mode = self.base.options.optimize_mode;
const match: MatchingSection = blk: {
// TODO finish and audit this function
@@ -4021,9 +4013,11 @@ fn getMatchingSectionDecl(self: *MachO, decl: *Module.Decl) !MatchingSection {
},
}
};
const local = self.locals.items[atom.local_sym_index];
const seg = self.load_commands.items[match.seg].segment;
const sect = seg.sections.items[match.sect];
log.debug(" allocating atom in '{s},{s}' ({d},{d})", .{
log.debug(" allocating atom '{s}' in '{s},{s}' ({d},{d})", .{
self.getString(local.n_strx),
sect.segName(),
sect.sectName(),
match.seg,
@@ -4039,7 +4033,7 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
const decl_ptr = self.decls.getPtr(decl).?;
if (decl_ptr.* == null) {
decl_ptr.* = try self.getMatchingSectionDecl(decl);
decl_ptr.* = try self.getMatchingSectionAtom(&decl.link.macho, decl.ty, decl.val);
}
const match = decl_ptr.*.?;
@@ -4288,6 +4282,8 @@ fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void {
}, true);
self.locals_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
self.locals.items[atom.local_sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(atom.local_sym_index);
atom.local_sym_index = 0;
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -4314,6 +4310,7 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
}
self.locals.items[decl.link.macho.local_sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(decl.link.macho.local_sym_index);
decl.link.macho.local_sym_index = 0;
}
if (self.d_sym) |*ds| {
@@ -4341,16 +4338,11 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
}
}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
assert(self.llvm_object == null);
assert(decl.link.macho.local_sym_index != 0);
return self.locals.items[decl.link.macho.local_sym_index].n_value;
}
pub fn getDeclVAddrWithReloc(self: *MachO, decl: *const Module.Decl, offset: u64) !u64 {
assert(decl.link.macho.local_sym_index != 0);
assert(self.active_decl != null);
const atom = &self.active_decl.?.link.macho;
const atom = self.atom_by_index_table.get(parent_atom_index).?;
try atom.relocs.append(self.base.allocator, .{
.offset = @intCast(u32, offset),
.target = .{ .local = decl.link.macho.local_sym_index },

View File

@@ -302,7 +302,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const sym_index = decl.link.plan9.sym_index orelse 0;
const res = try codegen.generateSymbol(&self.base, @intCast(u32, sym_index), decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = .{} });
@@ -749,7 +751,9 @@ pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
_ = self;
_ = decl;
}
pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl) u64 {
pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, parent_atom_index: u32, offset: u64) !u64 {
_ = parent_atom_index;
_ = offset;
if (decl.ty.zigTypeTag() == .Fn) {
var start = self.bases.text;
var it_file = self.fn_decl_table.iterator();

View File

@@ -38,6 +38,7 @@ test {
_ = @import("behavior/optional.zig");
_ = @import("behavior/prefetch.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
_ = @import("behavior/type.zig");
_ = @import("behavior/truncate.zig");
@@ -76,7 +77,6 @@ test {
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/src.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/try.zig");

View File

@@ -120,14 +120,12 @@ test "return string from function" {
test "hex escape" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
test "multiline string" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -140,7 +138,6 @@ test "multiline string" {
test "multiline string comments at start" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
//\\one
@@ -153,7 +150,6 @@ test "multiline string comments at start" {
test "multiline string comments at end" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -166,7 +162,6 @@ test "multiline string comments at end" {
test "multiline string comments in middle" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -179,7 +174,6 @@ test "multiline string comments in middle" {
test "multiline string comments at multiple places" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 =
\\one
@@ -193,14 +187,11 @@ test "multiline string comments at multiple places" {
}
test "string concatenation" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(mem.eql(u8, "OK" ++ " IT " ++ "WORKED", "OK IT WORKED"));
}
test "array mult operator" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
}

View File

@@ -27,7 +27,10 @@ comptime {
}
test "slicing" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var array: [20]i32 = undefined;
array[5] = 1234;
@@ -45,6 +48,8 @@ test "slicing" {
test "const slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
const a = "1234567890";
try expect(a.len == 10);
@@ -56,6 +61,8 @@ test "const slice" {
test "comptime slice of undefined pointer of length 0" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const slice1 = @as([*]i32, undefined)[0..0];
try expect(slice1.len == 0);
const slice2 = @as([*]i32, undefined)[100..100];
@@ -64,6 +71,8 @@ test "comptime slice of undefined pointer of length 0" {
test "implicitly cast array of size 0 to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var msg = [_]u8{};
try assertLenIsZero(&msg);
}
@@ -74,6 +83,8 @@ fn assertLenIsZero(msg: []const u8) !void {
test "access len index of sentinel-terminated slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var slice: [:0]const u8 = "hello";
@@ -88,6 +99,8 @@ test "access len index of sentinel-terminated slice" {
test "comptime slice of slice preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var buff: [10]u8 = undefined;
buff[0..][0..][0] = 1;
@@ -97,6 +110,8 @@ test "comptime slice of slice preserves comptime var" {
test "slice of type" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var types_array = [_]type{ i32, f64, type };
for (types_array) |T, i| {
@@ -120,6 +135,9 @@ test "slice of type" {
test "generic malloc free" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const a = memAlloc(u8, 10) catch unreachable;
memFree(u8, a);
}
@@ -133,6 +151,8 @@ fn memFree(comptime T: type, memory: []T) void {
test "slice of hardcoded address to pointer" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
const pointer = @intToPtr([*]u8, 0x04)[0..2];
@@ -148,6 +168,8 @@ test "slice of hardcoded address to pointer" {
test "comptime slice of pointer preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var buff: [10]u8 = undefined;
var a = @ptrCast([*]u8, &buff);
@@ -158,6 +180,8 @@ test "comptime slice of pointer preserves comptime var" {
test "comptime pointer cast array and then slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
const ptrA: [*]const u8 = @ptrCast([*]const u8, &array);
@@ -172,6 +196,9 @@ test "comptime pointer cast array and then slice" {
test "slicing zero length array" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const s1 = ""[0..];
const s2 = ([_]u32{})[0..];
try expect(s1.len == 0);
@@ -185,6 +212,8 @@ const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(@ptrToInt(x) == 0x1000);
try expect(x.len == 0x500);
@@ -194,6 +223,9 @@ test "compile time slice of pointer to hard coded address" {
}
test "slice string literal has correct type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
comptime {
try expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8);
const array = [_]i32{ 1, 2, 3, 4 };
@@ -207,6 +239,7 @@ test "slice string literal has correct type" {
test "result location zero sized array inside struct field implicit cast to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const E = struct {
entries: []u32,
@@ -216,6 +249,9 @@ test "result location zero sized array inside struct field implicit cast to slic
}
test "runtime safety lets us slice from len..len" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var an_array = [_]u8{ 1, 2, 3 };
try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
}
@@ -225,6 +261,9 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
var len: u32 = 10;
var slice = buf[0..len];
@@ -232,6 +271,9 @@ test "C pointer" {
}
test "C pointer slice access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var buf: [10]u32 = [1]u32{42} ** 10;
const c_ptr = @ptrCast([*c]const u32, &buf);
@@ -245,6 +287,8 @@ test "C pointer slice access" {
}
test "comptime slices are disambiguated" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(sliceSum(&[_]u8{ 1, 2 }) == 3);
try expect(sliceSum(&[_]u8{ 3, 4 }) == 7);
}
@@ -258,6 +302,9 @@ fn sliceSum(comptime q: []const u8) i32 {
}
test "slice type with custom alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const LazilyResolvedType = struct {
anything: i32,
};
@@ -269,6 +316,8 @@ test "slice type with custom alignment" {
}
test "obtaining a null terminated slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// here we have a normal array
@@ -294,6 +343,7 @@ test "obtaining a null terminated slice" {
test "empty array to slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -312,6 +362,9 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -327,6 +380,7 @@ test "@ptrCast slice to pointer" {
test "slice syntax resulting in pointer-to-array" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -475,6 +529,7 @@ test "slice syntax resulting in pointer-to-array" {
test "type coercion of pointer to anon struct literal to pointer to slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
const U = union {
@@ -508,6 +563,7 @@ test "type coercion of pointer to anon struct literal to pointer to slice" {
test "array concat of slices gives slice" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
comptime {
var a: []const u8 = "aoeu";
@@ -519,6 +575,7 @@ test "array concat of slices gives slice" {
test "slice bounds in comptime concatenation" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bs = comptime blk: {
const b = "........1........";
@@ -535,6 +592,7 @@ test "slice bounds in comptime concatenation" {
test "slice sentinel access at comptime" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
{
const str0 = &[_:0]u8{ '1', '2', '3' };