ZIR: add cmp and condbr instructions

This commit is contained in:
Andrew Kelley
2020-04-28 21:04:18 -04:00
parent fb67a7260d
commit 6b0f7de247
7 changed files with 1629 additions and 449 deletions

View File

@@ -986,6 +986,43 @@ pub const Order = enum {
/// Greater than (`>`)
gt,
pub fn invert(self: Order) Order {
return switch (self) {
.lt => .gt,
.eq => .eq,
.gt => .gt,
};
}
pub fn compare(self: Order, op: CompareOperator) bool {
return switch (self) {
.lt => switch (op) {
.lt => true,
.lte => true,
.eq => false,
.gte => false,
.gt => false,
.neq => true,
},
.eq => switch (op) {
.lt => false,
.lte => true,
.eq => true,
.gte => true,
.gt => false,
.neq => false,
},
.gt => switch (op) {
.lt => false,
.lte => false,
.eq => false,
.gte => true,
.gt => true,
.neq => true,
},
};
}
};
/// Given two numbers, this function returns the order they are with respect to each other.

View File

@@ -60,6 +60,13 @@ pub const Int = struct {
return s;
}
/// Hint: use `calcLimbLen` to figure out how big an array to allocate for `limbs`.
pub fn initSetFixed(limbs: []Limb, value: var) Int {
var s = Int.initFixed(limbs);
s.set(value) catch unreachable;
return s;
}
/// Creates a new Int with a specific capacity. If capacity < default_capacity then the
/// default capacity will be used instead.
pub fn initCapacity(allocator: *Allocator, capacity: usize) !Int {
@@ -104,12 +111,11 @@ pub const Int = struct {
/// Returns an Int backed by a fixed set of limb values.
/// This is read-only and cannot be used as a result argument. If the Int tries to allocate
/// memory a runtime panic will occur.
pub fn initFixed(limbs: []const Limb) Int {
pub fn initFixed(limbs: []Limb) Int {
var self = Int{
.allocator = null,
.metadata = limbs.len,
// Cast away the const, invalid use to pass as a pointer argument.
.limbs = @intToPtr([*]Limb, @ptrToInt(limbs.ptr))[0..limbs.len],
.limbs = limbs,
};
self.normalize(limbs.len);
@@ -218,7 +224,7 @@ pub const Int = struct {
/// one greater than the returned value.
///
/// e.g. -127 returns 8 as it will fit in an i8. 127 returns 7 since it fits in a u7.
fn bitCountTwosComp(self: Int) usize {
pub fn bitCountTwosComp(self: Int) usize {
var bits = self.bitCountAbs();
// If the entire value has only one bit set (e.g. 0b100000000) then the negation in twos
@@ -267,7 +273,6 @@ pub const Int = struct {
/// Sets an Int to value. Value must be an primitive integer type.
pub fn set(self: *Int, value: var) Allocator.Error!void {
self.assertWritable();
const T = @TypeOf(value);
switch (@typeInfo(T)) {
@@ -598,6 +603,13 @@ pub const Int = struct {
}
}
/// Same as `cmp` but the right-hand operand is a primitive integer.
pub fn orderAgainstScalar(lhs: Int, scalar: var) math.Order {
var limbs: [calcLimbLen(scalar)]Limb = undefined;
const rhs = initSetFixed(&limbs, scalar);
return cmp(lhs, rhs);
}
/// Returns true if a == 0.
pub fn eqZero(a: Int) bool {
return a.len() == 1 and a.limbs[0] == 0;
@@ -642,6 +654,33 @@ pub const Int = struct {
};
}
/// Returns the number of limbs needed to store `scalar`, which must be a
/// primitive integer value.
pub fn calcLimbLen(scalar: var) usize {
switch (@typeInfo(@TypeOf(scalar))) {
.Int => return @sizeOf(scalar) / @sizeOf(Limb),
.ComptimeInt => {
const w_value = if (scalar < 0) -scalar else scalar;
const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
return req_limbs;
},
else => @compileError("parameter must be a primitive integer type"),
}
}
/// r = a + scalar
///
/// r and a may be aliases.
/// scalar is a primitive integer type.
///
/// Returns an error if memory could not be allocated.
pub fn addScalar(r: *Int, a: Int, scalar: var) Allocator.Error!void {
var limbs: [calcLimbLen(scalar)]Limb = undefined;
var operand = initFixed(&limbs);
operand.set(scalar) catch unreachable;
return add(r, a, operand);
}
/// r = a + b
///
/// r, a and b may be aliases.

View File

@@ -501,11 +501,19 @@ fn Reg(comptime arch: Target.Cpu.Arch) type {
bh,
ch,
dh,
bph,
sph,
sih,
dih,
al,
bl,
cl,
dl,
bpl,
spl,
sil,
dil,
r8b,
r9b,
r10b,

View File

@@ -26,6 +26,10 @@ pub const Inst = struct {
assembly,
ptrtoint,
bitcast,
cmp,
condbr,
isnull,
isnonnull,
};
pub fn cast(base: *Inst, comptime T: type) ?*T {
@@ -41,15 +45,11 @@ pub const Inst = struct {
/// Returns `null` if runtime-known.
pub fn value(base: *Inst) ?Value {
return switch (base.tag) {
.unreach => Value.initTag(.noreturn_value),
.constant => base.cast(Constant).?.val,
if (base.ty.onePossibleValue())
return Value.initTag(.the_one_possible_value);
.assembly,
.ptrtoint,
.bitcast,
=> null,
};
const inst = base.cast(Constant) orelse return null;
return inst.val;
}
pub const Unreach = struct {
@@ -96,6 +96,46 @@ pub const Inst = struct {
operand: *Inst,
},
};
pub const Cmp = struct {
pub const base_tag = Tag.cmp;
base: Inst,
args: struct {
lhs: *Inst,
op: std.math.CompareOperator,
rhs: *Inst,
},
};
pub const CondBr = struct {
pub const base_tag = Tag.condbr;
base: Inst,
args: struct {
condition: *Inst,
true_body: Module.Body,
false_body: Module.Body,
},
};
pub const IsNull = struct {
pub const base_tag = Tag.isnull;
base: Inst,
args: struct {
operand: *Inst,
},
};
pub const IsNonNull = struct {
pub const base_tag = Tag.isnonnull;
base: Inst,
args: struct {
operand: *Inst,
},
};
};
pub const TypedValue = struct {
@@ -118,15 +158,19 @@ pub const Module = struct {
pub const Fn = struct {
analysis_status: enum { in_progress, failure, success },
body: []*Inst,
body: Body,
fn_type: Type,
};
pub const Body = struct {
instructions: []*Inst,
};
pub fn deinit(self: *Module, allocator: *Allocator) void {
allocator.free(self.exports);
allocator.free(self.errors);
for (self.fns) |f| {
allocator.free(f.body);
allocator.free(f.body.instructions);
}
allocator.free(self.fns);
self.arena.deinit();
@@ -192,10 +236,15 @@ const Analyze = struct {
};
const Fn = struct {
body: std.ArrayList(*Inst),
inst_table: std.AutoHashMap(*text.Inst, NewInst),
/// Index into Module fns array
fn_index: usize,
inner_block: Block,
inst_table: std.AutoHashMap(*text.Inst, NewInst),
};
const Block = struct {
func: *Fn,
instructions: std.ArrayList(*Inst),
};
const InnerError = error{ OutOfMemory, AnalysisFail };
@@ -208,9 +257,9 @@ const Analyze = struct {
}
}
fn resolveInst(self: *Analyze, opt_func: ?*Fn, old_inst: *text.Inst) InnerError!*Inst {
if (opt_func) |func| {
if (func.inst_table.get(old_inst)) |kv| {
fn resolveInst(self: *Analyze, opt_block: ?*Block, old_inst: *text.Inst) InnerError!*Inst {
if (opt_block) |block| {
if (block.func.inst_table.get(old_inst)) |kv| {
return kv.value.ptr orelse return error.AnalysisFail;
}
}
@@ -230,12 +279,12 @@ const Analyze = struct {
}
}
fn requireFunctionBody(self: *Analyze, func: ?*Fn, src: usize) !*Fn {
return func orelse return self.fail(src, "instruction illegal outside function body", .{});
fn requireRuntimeBlock(self: *Analyze, block: ?*Block, src: usize) !*Block {
return block orelse return self.fail(src, "instruction illegal outside function body", .{});
}
fn resolveInstConst(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) InnerError!TypedValue {
const new_inst = try self.resolveInst(func, old_inst);
fn resolveInstConst(self: *Analyze, block: ?*Block, old_inst: *text.Inst) InnerError!TypedValue {
const new_inst = try self.resolveInst(block, old_inst);
const val = try self.resolveConstValue(new_inst);
return TypedValue{
.ty = new_inst.ty,
@@ -244,28 +293,39 @@ const Analyze = struct {
}
fn resolveConstValue(self: *Analyze, base: *Inst) !Value {
return base.value() orelse return self.fail(base.src, "unable to resolve comptime value", .{});
return (try self.resolveDefinedValue(base)) orelse
return self.fail(base.src, "unable to resolve comptime value", .{});
}
fn resolveConstString(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) ![]u8 {
const new_inst = try self.resolveInst(func, old_inst);
fn resolveDefinedValue(self: *Analyze, base: *Inst) !?Value {
if (base.value()) |val| {
if (val.isUndef()) {
return self.fail(base.src, "use of undefined value here causes undefined behavior", .{});
}
return val;
}
return null;
}
fn resolveConstString(self: *Analyze, block: ?*Block, old_inst: *text.Inst) ![]u8 {
const new_inst = try self.resolveInst(block, old_inst);
const wanted_type = Type.initTag(.const_slice_u8);
const coerced_inst = try self.coerce(func, wanted_type, new_inst);
const coerced_inst = try self.coerce(block, wanted_type, new_inst);
const val = try self.resolveConstValue(coerced_inst);
return val.toAllocatedBytes(&self.arena.allocator);
}
fn resolveType(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) !Type {
const new_inst = try self.resolveInst(func, old_inst);
fn resolveType(self: *Analyze, block: ?*Block, old_inst: *text.Inst) !Type {
const new_inst = try self.resolveInst(block, old_inst);
const wanted_type = Type.initTag(.@"type");
const coerced_inst = try self.coerce(func, wanted_type, new_inst);
const coerced_inst = try self.coerce(block, wanted_type, new_inst);
const val = try self.resolveConstValue(coerced_inst);
return val.toType();
}
fn analyzeExport(self: *Analyze, func: ?*Fn, export_inst: *text.Inst.Export) !void {
const symbol_name = try self.resolveConstString(func, export_inst.positionals.symbol_name);
const typed_value = try self.resolveInstConst(func, export_inst.positionals.value);
fn analyzeExport(self: *Analyze, block: ?*Block, export_inst: *text.Inst.Export) !void {
const symbol_name = try self.resolveConstString(block, export_inst.positionals.symbol_name);
const typed_value = try self.resolveInstConst(block, export_inst.positionals.value);
switch (typed_value.ty.zigTypeTag()) {
.Fn => {},
@@ -285,18 +345,18 @@ const Analyze = struct {
/// TODO should not need the cast on the last parameter at the callsites
fn addNewInstArgs(
self: *Analyze,
func: *Fn,
block: *Block,
src: usize,
ty: Type,
comptime T: type,
args: Inst.Args(T),
) !*Inst {
const inst = try self.addNewInst(func, src, ty, T);
const inst = try self.addNewInst(block, src, ty, T);
inst.args = args;
return &inst.base;
}
fn addNewInst(self: *Analyze, func: *Fn, src: usize, ty: Type, comptime T: type) !*T {
fn addNewInst(self: *Analyze, block: *Block, src: usize, ty: Type, comptime T: type) !*T {
const inst = try self.arena.allocator.create(T);
inst.* = .{
.base = .{
@@ -306,7 +366,7 @@ const Analyze = struct {
},
.args = undefined,
};
try func.body.append(&inst.base);
try block.instructions.append(&inst.base);
return inst;
}
@@ -349,7 +409,21 @@ const Analyze = struct {
fn constVoid(self: *Analyze, src: usize) !*Inst {
return self.constInst(src, .{
.ty = Type.initTag(.void),
.val = Value.initTag(.void_value),
.val = Value.initTag(.the_one_possible_value),
});
}
fn constUndef(self: *Analyze, src: usize, ty: Type) !*Inst {
return self.constInst(src, .{
.ty = ty,
.val = Value.initTag(.undef),
});
}
fn constBool(self: *Analyze, src: usize, v: bool) !*Inst {
return self.constInst(src, .{
.ty = Type.initTag(.bool),
.val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)],
});
}
@@ -399,7 +473,7 @@ const Analyze = struct {
});
}
fn analyzeInst(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) InnerError!*Inst {
fn analyzeInst(self: *Analyze, block: ?*Block, old_inst: *text.Inst) InnerError!*Inst {
switch (old_inst.tag) {
.str => {
// We can use this reference because Inst.Const's Value is arena-allocated.
@@ -411,35 +485,43 @@ const Analyze = struct {
const big_int = old_inst.cast(text.Inst.Int).?.positionals.int;
return self.constIntBig(old_inst.src, Type.initTag(.comptime_int), big_int);
},
.ptrtoint => return self.analyzeInstPtrToInt(func, old_inst.cast(text.Inst.PtrToInt).?),
.fieldptr => return self.analyzeInstFieldPtr(func, old_inst.cast(text.Inst.FieldPtr).?),
.deref => return self.analyzeInstDeref(func, old_inst.cast(text.Inst.Deref).?),
.as => return self.analyzeInstAs(func, old_inst.cast(text.Inst.As).?),
.@"asm" => return self.analyzeInstAsm(func, old_inst.cast(text.Inst.Asm).?),
.@"unreachable" => return self.analyzeInstUnreachable(func, old_inst.cast(text.Inst.Unreachable).?),
.@"fn" => return self.analyzeInstFn(func, old_inst.cast(text.Inst.Fn).?),
.ptrtoint => return self.analyzeInstPtrToInt(block, old_inst.cast(text.Inst.PtrToInt).?),
.fieldptr => return self.analyzeInstFieldPtr(block, old_inst.cast(text.Inst.FieldPtr).?),
.deref => return self.analyzeInstDeref(block, old_inst.cast(text.Inst.Deref).?),
.as => return self.analyzeInstAs(block, old_inst.cast(text.Inst.As).?),
.@"asm" => return self.analyzeInstAsm(block, old_inst.cast(text.Inst.Asm).?),
.@"unreachable" => return self.analyzeInstUnreachable(block, old_inst.cast(text.Inst.Unreachable).?),
.@"fn" => return self.analyzeInstFn(block, old_inst.cast(text.Inst.Fn).?),
.@"export" => {
try self.analyzeExport(func, old_inst.cast(text.Inst.Export).?);
try self.analyzeExport(block, old_inst.cast(text.Inst.Export).?);
return self.constVoid(old_inst.src);
},
.primitive => return self.analyzeInstPrimitive(func, old_inst.cast(text.Inst.Primitive).?),
.fntype => return self.analyzeInstFnType(func, old_inst.cast(text.Inst.FnType).?),
.intcast => return self.analyzeInstIntCast(func, old_inst.cast(text.Inst.IntCast).?),
.bitcast => return self.analyzeInstBitCast(func, old_inst.cast(text.Inst.BitCast).?),
.elemptr => return self.analyzeInstElemPtr(func, old_inst.cast(text.Inst.ElemPtr).?),
.add => return self.analyzeInstAdd(func, old_inst.cast(text.Inst.Add).?),
.primitive => return self.analyzeInstPrimitive(old_inst.cast(text.Inst.Primitive).?),
.fntype => return self.analyzeInstFnType(block, old_inst.cast(text.Inst.FnType).?),
.intcast => return self.analyzeInstIntCast(block, old_inst.cast(text.Inst.IntCast).?),
.bitcast => return self.analyzeInstBitCast(block, old_inst.cast(text.Inst.BitCast).?),
.elemptr => return self.analyzeInstElemPtr(block, old_inst.cast(text.Inst.ElemPtr).?),
.add => return self.analyzeInstAdd(block, old_inst.cast(text.Inst.Add).?),
.cmp => return self.analyzeInstCmp(block, old_inst.cast(text.Inst.Cmp).?),
.condbr => return self.analyzeInstCondBr(block, old_inst.cast(text.Inst.CondBr).?),
.isnull => return self.analyzeInstIsNull(block, old_inst.cast(text.Inst.IsNull).?),
.isnonnull => return self.analyzeInstIsNonNull(block, old_inst.cast(text.Inst.IsNonNull).?),
}
}
fn analyzeInstFn(self: *Analyze, opt_func: ?*Fn, fn_inst: *text.Inst.Fn) InnerError!*Inst {
const fn_type = try self.resolveType(opt_func, fn_inst.positionals.fn_type);
fn analyzeInstFn(self: *Analyze, block: ?*Block, fn_inst: *text.Inst.Fn) InnerError!*Inst {
const fn_type = try self.resolveType(block, fn_inst.positionals.fn_type);
var new_func: Fn = .{
.body = std.ArrayList(*Inst).init(self.allocator),
.inst_table = std.AutoHashMap(*text.Inst, NewInst).init(self.allocator),
.fn_index = self.fns.items.len,
.inner_block = .{
.func = undefined,
.instructions = std.ArrayList(*Inst).init(self.allocator),
},
.inst_table = std.AutoHashMap(*text.Inst, NewInst).init(self.allocator),
};
defer new_func.body.deinit();
new_func.inner_block.func = &new_func;
defer new_func.inner_block.instructions.deinit();
defer new_func.inst_table.deinit();
// Don't hang on to a reference to this when analyzing body instructions, since the memory
// could become invalid.
@@ -449,18 +531,11 @@ const Analyze = struct {
.body = undefined,
};
for (fn_inst.positionals.body.instructions) |src_inst| {
const new_inst = self.analyzeInst(&new_func, src_inst) catch |err| {
self.fns.items[new_func.fn_index].analysis_status = .failure;
try new_func.inst_table.putNoClobber(src_inst, .{ .ptr = null });
return err;
};
try new_func.inst_table.putNoClobber(src_inst, .{ .ptr = new_inst });
}
try self.analyzeBody(&new_func.inner_block, fn_inst.positionals.body);
const f = &self.fns.items[new_func.fn_index];
f.analysis_status = .success;
f.body = new_func.body.toOwnedSlice();
f.body = .{ .instructions = new_func.inner_block.instructions.toOwnedSlice() };
const fn_payload = try self.arena.allocator.create(Value.Payload.Function);
fn_payload.* = .{ .index = new_func.fn_index };
@@ -471,8 +546,8 @@ const Analyze = struct {
});
}
fn analyzeInstFnType(self: *Analyze, func: ?*Fn, fntype: *text.Inst.FnType) InnerError!*Inst {
const return_type = try self.resolveType(func, fntype.positionals.return_type);
fn analyzeInstFnType(self: *Analyze, block: ?*Block, fntype: *text.Inst.FnType) InnerError!*Inst {
const return_type = try self.resolveType(block, fntype.positionals.return_type);
if (return_type.zigTypeTag() == .NoReturn and
fntype.positionals.param_types.len == 0 and
@@ -484,30 +559,30 @@ const Analyze = struct {
return self.fail(fntype.base.src, "TODO implement fntype instruction more", .{});
}
fn analyzeInstPrimitive(self: *Analyze, func: ?*Fn, primitive: *text.Inst.Primitive) InnerError!*Inst {
fn analyzeInstPrimitive(self: *Analyze, primitive: *text.Inst.Primitive) InnerError!*Inst {
return self.constType(primitive.base.src, primitive.positionals.tag.toType());
}
fn analyzeInstAs(self: *Analyze, func: ?*Fn, as: *text.Inst.As) InnerError!*Inst {
const dest_type = try self.resolveType(func, as.positionals.dest_type);
const new_inst = try self.resolveInst(func, as.positionals.value);
return self.coerce(func, dest_type, new_inst);
fn analyzeInstAs(self: *Analyze, block: ?*Block, as: *text.Inst.As) InnerError!*Inst {
const dest_type = try self.resolveType(block, as.positionals.dest_type);
const new_inst = try self.resolveInst(block, as.positionals.value);
return self.coerce(block, dest_type, new_inst);
}
fn analyzeInstPtrToInt(self: *Analyze, func: ?*Fn, ptrtoint: *text.Inst.PtrToInt) InnerError!*Inst {
const ptr = try self.resolveInst(func, ptrtoint.positionals.ptr);
fn analyzeInstPtrToInt(self: *Analyze, block: ?*Block, ptrtoint: *text.Inst.PtrToInt) InnerError!*Inst {
const ptr = try self.resolveInst(block, ptrtoint.positionals.ptr);
if (ptr.ty.zigTypeTag() != .Pointer) {
return self.fail(ptrtoint.positionals.ptr.src, "expected pointer, found '{}'", .{ptr.ty});
}
// TODO handle known-pointer-address
const f = try self.requireFunctionBody(func, ptrtoint.base.src);
const b = try self.requireRuntimeBlock(block, ptrtoint.base.src);
const ty = Type.initTag(.usize);
return self.addNewInstArgs(f, ptrtoint.base.src, ty, Inst.PtrToInt, Inst.Args(Inst.PtrToInt){ .ptr = ptr });
return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, Inst.Args(Inst.PtrToInt){ .ptr = ptr });
}
fn analyzeInstFieldPtr(self: *Analyze, func: ?*Fn, fieldptr: *text.Inst.FieldPtr) InnerError!*Inst {
const object_ptr = try self.resolveInst(func, fieldptr.positionals.object_ptr);
const field_name = try self.resolveConstString(func, fieldptr.positionals.field_name);
fn analyzeInstFieldPtr(self: *Analyze, block: ?*Block, fieldptr: *text.Inst.FieldPtr) InnerError!*Inst {
const object_ptr = try self.resolveInst(block, fieldptr.positionals.object_ptr);
const field_name = try self.resolveConstString(block, fieldptr.positionals.field_name);
const elem_ty = switch (object_ptr.ty.zigTypeTag()) {
.Pointer => object_ptr.ty.elemType(),
@@ -538,9 +613,9 @@ const Analyze = struct {
}
}
fn analyzeInstIntCast(self: *Analyze, func: ?*Fn, intcast: *text.Inst.IntCast) InnerError!*Inst {
const dest_type = try self.resolveType(func, intcast.positionals.dest_type);
const new_inst = try self.resolveInst(func, intcast.positionals.value);
fn analyzeInstIntCast(self: *Analyze, block: ?*Block, intcast: *text.Inst.IntCast) InnerError!*Inst {
const dest_type = try self.resolveType(block, intcast.positionals.dest_type);
const new_inst = try self.resolveInst(block, intcast.positionals.value);
const dest_is_comptime_int = switch (dest_type.zigTypeTag()) {
.ComptimeInt => true,
@@ -564,22 +639,22 @@ const Analyze = struct {
}
if (dest_is_comptime_int or new_inst.value() != null) {
return self.coerce(func, dest_type, new_inst);
return self.coerce(block, dest_type, new_inst);
}
return self.fail(intcast.base.src, "TODO implement analyze widen or shorten int", .{});
}
fn analyzeInstBitCast(self: *Analyze, func: ?*Fn, inst: *text.Inst.BitCast) InnerError!*Inst {
const dest_type = try self.resolveType(func, inst.positionals.dest_type);
const operand = try self.resolveInst(func, inst.positionals.operand);
return self.bitcast(func, dest_type, operand);
fn analyzeInstBitCast(self: *Analyze, block: ?*Block, inst: *text.Inst.BitCast) InnerError!*Inst {
const dest_type = try self.resolveType(block, inst.positionals.dest_type);
const operand = try self.resolveInst(block, inst.positionals.operand);
return self.bitcast(block, dest_type, operand);
}
fn analyzeInstElemPtr(self: *Analyze, func: ?*Fn, inst: *text.Inst.ElemPtr) InnerError!*Inst {
const array_ptr = try self.resolveInst(func, inst.positionals.array_ptr);
const uncasted_index = try self.resolveInst(func, inst.positionals.index);
const elem_index = try self.coerce(func, Type.initTag(.usize), uncasted_index);
fn analyzeInstElemPtr(self: *Analyze, block: ?*Block, inst: *text.Inst.ElemPtr) InnerError!*Inst {
const array_ptr = try self.resolveInst(block, inst.positionals.array_ptr);
const uncasted_index = try self.resolveInst(block, inst.positionals.index);
const elem_index = try self.coerce(block, Type.initTag(.usize), uncasted_index);
if (array_ptr.ty.isSinglePointer() and array_ptr.ty.elemType().zigTypeTag() == .Array) {
if (array_ptr.value()) |array_ptr_val| {
@@ -607,15 +682,19 @@ const Analyze = struct {
return self.fail(inst.base.src, "TODO implement more analyze elemptr", .{});
}
fn analyzeInstAdd(self: *Analyze, func: ?*Fn, inst: *text.Inst.Add) InnerError!*Inst {
const lhs = try self.resolveInst(func, inst.positionals.lhs);
const rhs = try self.resolveInst(func, inst.positionals.rhs);
fn analyzeInstAdd(self: *Analyze, block: ?*Block, inst: *text.Inst.Add) InnerError!*Inst {
const lhs = try self.resolveInst(block, inst.positionals.lhs);
const rhs = try self.resolveInst(block, inst.positionals.rhs);
if (lhs.ty.zigTypeTag() == .Int and rhs.ty.zigTypeTag() == .Int) {
if (lhs.value()) |lhs_val| {
if (rhs.value()) |rhs_val| {
const lhs_bigint = try lhs_val.toBigInt(&self.arena.allocator);
const rhs_bigint = try rhs_val.toBigInt(&self.arena.allocator);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs_val.toBigInt(&lhs_space);
const rhs_bigint = rhs_val.toBigInt(&rhs_space);
var result_bigint = try BigInt.init(&self.arena.allocator);
try BigInt.add(&result_bigint, lhs_bigint, rhs_bigint);
@@ -637,8 +716,8 @@ const Analyze = struct {
return self.fail(inst.base.src, "TODO implement more analyze add", .{});
}
fn analyzeInstDeref(self: *Analyze, func: ?*Fn, deref: *text.Inst.Deref) InnerError!*Inst {
const ptr = try self.resolveInst(func, deref.positionals.ptr);
fn analyzeInstDeref(self: *Analyze, block: ?*Block, deref: *text.Inst.Deref) InnerError!*Inst {
const ptr = try self.resolveInst(block, deref.positionals.ptr);
const elem_ty = switch (ptr.ty.zigTypeTag()) {
.Pointer => ptr.ty.elemType(),
else => return self.fail(deref.positionals.ptr.src, "expected pointer, found '{}'", .{ptr.ty}),
@@ -653,28 +732,28 @@ const Analyze = struct {
return self.fail(deref.base.src, "TODO implement runtime deref", .{});
}
fn analyzeInstAsm(self: *Analyze, func: ?*Fn, assembly: *text.Inst.Asm) InnerError!*Inst {
const return_type = try self.resolveType(func, assembly.positionals.return_type);
const asm_source = try self.resolveConstString(func, assembly.positionals.asm_source);
const output = if (assembly.kw_args.output) |o| try self.resolveConstString(func, o) else null;
fn analyzeInstAsm(self: *Analyze, block: ?*Block, assembly: *text.Inst.Asm) InnerError!*Inst {
const return_type = try self.resolveType(block, assembly.positionals.return_type);
const asm_source = try self.resolveConstString(block, assembly.positionals.asm_source);
const output = if (assembly.kw_args.output) |o| try self.resolveConstString(block, o) else null;
const inputs = try self.arena.allocator.alloc([]const u8, assembly.kw_args.inputs.len);
const clobbers = try self.arena.allocator.alloc([]const u8, assembly.kw_args.clobbers.len);
const args = try self.arena.allocator.alloc(*Inst, assembly.kw_args.args.len);
for (inputs) |*elem, i| {
elem.* = try self.resolveConstString(func, assembly.kw_args.inputs[i]);
elem.* = try self.resolveConstString(block, assembly.kw_args.inputs[i]);
}
for (clobbers) |*elem, i| {
elem.* = try self.resolveConstString(func, assembly.kw_args.clobbers[i]);
elem.* = try self.resolveConstString(block, assembly.kw_args.clobbers[i]);
}
for (args) |*elem, i| {
const arg = try self.resolveInst(func, assembly.kw_args.args[i]);
elem.* = try self.coerce(func, Type.initTag(.usize), arg);
const arg = try self.resolveInst(block, assembly.kw_args.args[i]);
elem.* = try self.coerce(block, Type.initTag(.usize), arg);
}
const f = try self.requireFunctionBody(func, assembly.base.src);
return self.addNewInstArgs(f, assembly.base.src, return_type, Inst.Assembly, Inst.Args(Inst.Assembly){
const b = try self.requireRuntimeBlock(block, assembly.base.src);
return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, Inst.Args(Inst.Assembly){
.asm_source = asm_source,
.is_volatile = assembly.kw_args.@"volatile",
.output = output,
@@ -684,19 +763,350 @@ const Analyze = struct {
});
}
fn analyzeInstUnreachable(self: *Analyze, func: ?*Fn, unreach: *text.Inst.Unreachable) InnerError!*Inst {
const f = try self.requireFunctionBody(func, unreach.base.src);
return self.addNewInstArgs(f, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {});
fn analyzeInstCmp(self: *Analyze, block: ?*Block, inst: *text.Inst.Cmp) InnerError!*Inst {
const lhs = try self.resolveInst(block, inst.positionals.lhs);
const rhs = try self.resolveInst(block, inst.positionals.rhs);
const op = inst.positionals.op;
const is_equality_cmp = switch (op) {
.eq, .neq => true,
else => false,
};
const lhs_ty_tag = lhs.ty.zigTypeTag();
const rhs_ty_tag = rhs.ty.zigTypeTag();
if (is_equality_cmp and lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
// null == null, null != null
return self.constBool(inst.base.src, op == .eq);
} else if (is_equality_cmp and
((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or
rhs_ty_tag == .Null and lhs_ty_tag == .Optional))
{
// comparing null with optionals
const opt_operand = if (lhs_ty_tag == .Optional) lhs else rhs;
if (opt_operand.value()) |opt_val| {
const is_null = opt_val.isNull();
return self.constBool(inst.base.src, if (op == .eq) is_null else !is_null);
}
const b = try self.requireRuntimeBlock(block, inst.base.src);
switch (op) {
.eq => return self.addNewInstArgs(
b,
inst.base.src,
Type.initTag(.bool),
Inst.IsNull,
Inst.Args(Inst.IsNull){ .operand = opt_operand },
),
.neq => return self.addNewInstArgs(
b,
inst.base.src,
Type.initTag(.bool),
Inst.IsNonNull,
Inst.Args(Inst.IsNonNull){ .operand = opt_operand },
),
else => unreachable,
}
} else if (is_equality_cmp and
((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr())))
{
return self.fail(inst.base.src, "TODO implement C pointer cmp", .{});
} else if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
const non_null_type = if (lhs_ty_tag == .Null) rhs.ty else lhs.ty;
return self.fail(inst.base.src, "comparison of '{}' with null", .{non_null_type});
} else if (is_equality_cmp and
((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or
(rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union)))
{
return self.fail(inst.base.src, "TODO implement equality comparison between a union's tag value and an enum literal", .{});
} else if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
if (!is_equality_cmp) {
return self.fail(inst.base.src, "{} operator not allowed for errors", .{@tagName(op)});
}
return self.fail(inst.base.src, "TODO implement equality comparison between errors", .{});
} else if (lhs.ty.isNumeric() and rhs.ty.isNumeric()) {
// This operation allows any combination of integer and float types, regardless of the
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
// numeric types.
return self.cmpNumeric(block, inst.base.src, lhs, rhs, op);
}
return self.fail(inst.base.src, "TODO implement more cmp analysis", .{});
}
fn coerce(self: *Analyze, func: ?*Fn, dest_type: Type, inst: *Inst) !*Inst {
fn analyzeInstIsNull(self: *Analyze, block: ?*Block, inst: *text.Inst.IsNull) InnerError!*Inst {
const operand = try self.resolveInst(block, inst.positionals.operand);
return self.analyzeIsNull(block, inst.base.src, operand, true);
}
fn analyzeInstIsNonNull(self: *Analyze, block: ?*Block, inst: *text.Inst.IsNonNull) InnerError!*Inst {
const operand = try self.resolveInst(block, inst.positionals.operand);
return self.analyzeIsNull(block, inst.base.src, operand, false);
}
fn analyzeInstCondBr(self: *Analyze, block: ?*Block, inst: *text.Inst.CondBr) InnerError!*Inst {
const uncasted_cond = try self.resolveInst(block, inst.positionals.condition);
const cond = try self.coerce(block, Type.initTag(.bool), uncasted_cond);
if (try self.resolveDefinedValue(cond)) |cond_val| {
const body = if (cond_val.toBool()) &inst.positionals.true_body else &inst.positionals.false_body;
try self.analyzeBody(block, body.*);
return self.constVoid(inst.base.src);
}
const parent_block = try self.requireRuntimeBlock(block, inst.base.src);
var true_block: Block = .{
.func = parent_block.func,
.instructions = std.ArrayList(*Inst).init(self.allocator),
};
defer true_block.instructions.deinit();
try self.analyzeBody(&true_block, inst.positionals.true_body);
var false_block: Block = .{
.func = parent_block.func,
.instructions = std.ArrayList(*Inst).init(self.allocator),
};
defer false_block.instructions.deinit();
try self.analyzeBody(&false_block, inst.positionals.false_body);
// Copy the instruction pointers to the arena memory
const true_instructions = try self.arena.allocator.alloc(*Inst, true_block.instructions.items.len);
const false_instructions = try self.arena.allocator.alloc(*Inst, false_block.instructions.items.len);
mem.copy(*Inst, true_instructions, true_block.instructions.items);
mem.copy(*Inst, false_instructions, false_block.instructions.items);
return self.addNewInstArgs(parent_block, inst.base.src, Type.initTag(.void), Inst.CondBr, Inst.Args(Inst.CondBr){
.condition = cond,
.true_body = .{ .instructions = true_instructions },
.false_body = .{ .instructions = false_instructions },
});
}
fn analyzeInstUnreachable(self: *Analyze, block: ?*Block, unreach: *text.Inst.Unreachable) InnerError!*Inst {
const b = try self.requireRuntimeBlock(block, unreach.base.src);
return self.addNewInstArgs(b, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {});
}
fn analyzeBody(self: *Analyze, block: ?*Block, body: text.Module.Body) !void {
for (body.instructions) |src_inst| {
const new_inst = self.analyzeInst(block, src_inst) catch |err| {
if (block) |b| {
self.fns.items[b.func.fn_index].analysis_status = .failure;
try b.func.inst_table.putNoClobber(src_inst, .{ .ptr = null });
}
return err;
};
if (block) |b| try b.func.inst_table.putNoClobber(src_inst, .{ .ptr = new_inst });
}
}
fn analyzeIsNull(
self: *Analyze,
block: ?*Block,
src: usize,
operand: *Inst,
invert_logic: bool,
) InnerError!*Inst {
return self.fail(src, "TODO implement analysis of isnull and isnotnull", .{});
}
/// Asserts that lhs and rhs types are both numeric.
fn cmpNumeric(
self: *Analyze,
block: ?*Block,
src: usize,
lhs: *Inst,
rhs: *Inst,
op: std.math.CompareOperator,
) !*Inst {
assert(lhs.ty.isNumeric());
assert(rhs.ty.isNumeric());
const lhs_ty_tag = lhs.ty.zigTypeTag();
const rhs_ty_tag = rhs.ty.zigTypeTag();
if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) {
if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) {
return self.fail(src, "vector length mismatch: {} and {}", .{
lhs.ty.arrayLen(),
rhs.ty.arrayLen(),
});
}
return self.fail(src, "TODO implement support for vectors in cmpNumeric", .{});
} else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) {
return self.fail(src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{
lhs.ty,
rhs.ty,
});
}
if (lhs.value()) |lhs_val| {
if (rhs.value()) |rhs_val| {
return self.constBool(src, Value.compare(lhs_val, op, rhs_val));
}
}
// TODO handle comparisons against lazy zero values
// Some values can be compared against zero without being runtime known or without forcing
// a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
// always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
// of this function if we don't need to.
// It must be a runtime comparison.
const b = try self.requireRuntimeBlock(block, src);
// For floats, emit a float comparison instruction.
const lhs_is_float = switch (lhs_ty_tag) {
.Float, .ComptimeFloat => true,
else => false,
};
const rhs_is_float = switch (rhs_ty_tag) {
.Float, .ComptimeFloat => true,
else => false,
};
if (lhs_is_float and rhs_is_float) {
// Implicit cast the smaller one to the larger one.
const dest_type = x: {
if (lhs_ty_tag == .ComptimeFloat) {
break :x rhs.ty;
} else if (rhs_ty_tag == .ComptimeFloat) {
break :x lhs.ty;
}
if (lhs.ty.floatBits(self.target) >= rhs.ty.floatBits(self.target)) {
break :x lhs.ty;
} else {
break :x rhs.ty;
}
};
const casted_lhs = try self.coerce(block, dest_type, lhs);
const casted_rhs = try self.coerce(block, dest_type, rhs);
return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){
.lhs = casted_lhs,
.rhs = casted_rhs,
.op = op,
});
}
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
// For mixed signed and unsigned integers, implicit cast both operands to a signed
// integer with + 1 bit.
// For mixed floats and integers, extract the integer part from the float, cast that to
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
// add/subtract 1.
const lhs_is_signed = if (lhs.value()) |lhs_val|
lhs_val.compareWithZero(.lt)
else
(lhs.ty.isFloat() or lhs.ty.isSignedInt());
const rhs_is_signed = if (rhs.value()) |rhs_val|
rhs_val.compareWithZero(.lt)
else
(rhs.ty.isFloat() or rhs.ty.isSignedInt());
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
var dest_float_type: ?Type = null;
var lhs_bits: usize = undefined;
if (lhs.value()) |lhs_val| {
if (lhs_val.isUndef())
return self.constUndef(src, Type.initTag(.bool));
const is_unsigned = if (lhs_is_float) x: {
var bigint_space: Value.BigIntSpace = undefined;
var bigint = lhs_val.toBigInt(&bigint_space);
const zcmp = lhs_val.orderAgainstZero();
if (lhs_val.floatHasFraction()) {
switch (op) {
.eq => return self.constBool(src, false),
.neq => return self.constBool(src, true),
else => {},
}
if (zcmp == .lt) {
try bigint.addScalar(bigint, -1);
} else {
try bigint.addScalar(bigint, 1);
}
}
lhs_bits = bigint.bitCountTwosComp();
break :x (zcmp != .lt);
} else x: {
lhs_bits = lhs_val.intBitCountTwosComp();
break :x (lhs_val.orderAgainstZero() != .lt);
};
lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
} else if (lhs_is_float) {
dest_float_type = lhs.ty;
} else {
const int_info = lhs.ty.intInfo(self.target);
lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
}
var rhs_bits: usize = undefined;
if (rhs.value()) |rhs_val| {
if (rhs_val.isUndef())
return self.constUndef(src, Type.initTag(.bool));
const is_unsigned = if (rhs_is_float) x: {
var bigint_space: Value.BigIntSpace = undefined;
var bigint = rhs_val.toBigInt(&bigint_space);
const zcmp = rhs_val.orderAgainstZero();
if (rhs_val.floatHasFraction()) {
switch (op) {
.eq => return self.constBool(src, false),
.neq => return self.constBool(src, true),
else => {},
}
if (zcmp == .lt) {
try bigint.addScalar(bigint, -1);
} else {
try bigint.addScalar(bigint, 1);
}
}
rhs_bits = bigint.bitCountTwosComp();
break :x (zcmp != .lt);
} else x: {
rhs_bits = rhs_val.intBitCountTwosComp();
break :x (rhs_val.orderAgainstZero() != .lt);
};
rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
} else if (rhs_is_float) {
dest_float_type = rhs.ty;
} else {
const int_info = rhs.ty.intInfo(self.target);
rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
}
const dest_type = if (dest_float_type) |ft| ft else blk: {
const max_bits = std.math.max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) {
error.Overflow => return self.fail(src, "{} exceeds maximum integer bit count", .{max_bits}),
};
break :blk try self.makeIntType(dest_int_is_signed, casted_bits);
};
const casted_lhs = try self.coerce(block, dest_type, lhs);
const casted_rhs = try self.coerce(block, dest_type, lhs);
return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){
.lhs = casted_lhs,
.rhs = casted_rhs,
.op = op,
});
}
fn makeIntType(self: *Analyze, signed: bool, bits: u16) !Type {
if (signed) {
const int_payload = try self.arena.allocator.create(Type.Payload.IntSigned);
int_payload.* = .{ .bits = bits };
return Type.initPayload(&int_payload.base);
} else {
const int_payload = try self.arena.allocator.create(Type.Payload.IntUnsigned);
int_payload.* = .{ .bits = bits };
return Type.initPayload(&int_payload.base);
}
}
fn coerce(self: *Analyze, block: ?*Block, dest_type: Type, inst: *Inst) !*Inst {
// If the types are the same, we can return the operand.
if (dest_type.eql(inst.ty))
return inst;
const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty);
if (in_memory_result == .ok) {
return self.bitcast(func, dest_type, inst);
return self.bitcast(block, dest_type, inst);
}
// *[N]T to []T
@@ -740,14 +1150,14 @@ const Analyze = struct {
return self.fail(inst.src, "TODO implement type coercion from {} to {}", .{ inst.ty, dest_type });
}
fn bitcast(self: *Analyze, func: ?*Fn, dest_type: Type, inst: *Inst) !*Inst {
fn bitcast(self: *Analyze, block: ?*Block, dest_type: Type, inst: *Inst) !*Inst {
if (inst.value()) |val| {
// Keep the comptime Value representation; take the new type.
return self.constInst(inst.src, .{ .ty = dest_type, .val = val });
}
// TODO validate the type size and other compile errors
const f = try self.requireFunctionBody(func, inst.src);
return self.addNewInstArgs(f, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst });
const b = try self.requireRuntimeBlock(block, inst.src);
return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst });
}
fn coerceArrayPtrToSlice(self: *Analyze, dest_type: Type, inst: *Inst) !*Inst {
@@ -831,17 +1241,31 @@ pub fn main() anyerror!void {
try bos.flush();
}
// executable
//const link = @import("link.zig");
//var result = try link.updateExecutableFilePath(allocator, analyzed_module, std.fs.cwd(), "a.out");
//defer result.deinit(allocator);
//if (result.errors.len != 0) {
// for (result.errors) |err_msg| {
// const loc = std.zig.findLineColumn(source, err_msg.byte_offset);
// std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg });
// }
// if (debug_error_trace) return error.LinkFailure;
// std.process.exit(1);
//}
// object file
const link = @import("link.zig");
var result = try link.updateExecutableFilePath(allocator, analyzed_module, std.fs.cwd(), "a.out");
defer result.deinit(allocator);
if (result.errors.len != 0) {
for (result.errors) |err_msg| {
const loc = std.zig.findLineColumn(source, err_msg.byte_offset);
std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg });
}
if (debug_error_trace) return error.LinkFailure;
std.process.exit(1);
}
//var result = try link.updateExecutableFilePath(allocator, analyzed_module, std.fs.cwd(), "a.out");
//defer result.deinit(allocator);
//if (result.errors.len != 0) {
// for (result.errors) |err_msg| {
// const loc = std.zig.findLineColumn(source, err_msg.byte_offset);
// std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg });
// }
// if (debug_error_trace) return error.LinkFailure;
// std.process.exit(1);
//}
}
// Performance optimization ideas:

View File

@@ -34,6 +34,10 @@ pub const Inst = struct {
bitcast,
elemptr,
add,
cmp,
condbr,
isnull,
isnonnull,
};
pub fn TagToType(tag: Tag) type {
@@ -54,6 +58,10 @@ pub const Inst = struct {
.bitcast => BitCast,
.elemptr => ElemPtr,
.add => Add,
.cmp => Cmp,
.condbr => CondBr,
.isnull => IsNull,
.isnonnull => IsNonNull,
};
}
@@ -157,13 +165,9 @@ pub const Inst = struct {
positionals: struct {
fn_type: *Inst,
body: Body,
body: Module.Body,
},
kw_args: struct {},
pub const Body = struct {
instructions: []*Inst,
};
};
pub const Export = struct {
@@ -297,6 +301,50 @@ pub const Inst = struct {
},
kw_args: struct {},
};
pub const Cmp = struct {
pub const base_tag = Tag.cmp;
base: Inst,
positionals: struct {
lhs: *Inst,
op: std.math.CompareOperator,
rhs: *Inst,
},
kw_args: struct {},
};
pub const CondBr = struct {
pub const base_tag = Tag.condbr;
base: Inst,
positionals: struct {
condition: *Inst,
true_body: Module.Body,
false_body: Module.Body,
},
kw_args: struct {},
};
pub const IsNull = struct {
pub const base_tag = Tag.isnull;
base: Inst,
positionals: struct {
operand: *Inst,
},
kw_args: struct {},
};
pub const IsNonNull = struct {
pub const base_tag = Tag.isnonnull;
base: Inst,
positionals: struct {
operand: *Inst,
},
kw_args: struct {},
};
};
pub const ErrorMsg = struct {
@@ -309,6 +357,10 @@ pub const Module = struct {
errors: []ErrorMsg,
arena: std.heap.ArenaAllocator,
pub const Body = struct {
instructions: []*Inst,
};
pub fn deinit(self: *Module, allocator: *Allocator) void {
allocator.free(self.decls);
allocator.free(self.errors);
@@ -321,7 +373,7 @@ pub const Module = struct {
self.writeToStream(std.heap.page_allocator, std.io.getStdErr().outStream()) catch {};
}
const InstPtrTable = std.AutoHashMap(*Inst, struct { index: usize, fn_body: ?*Inst.Fn.Body });
const InstPtrTable = std.AutoHashMap(*Inst, struct { index: usize, fn_body: ?*Module.Body });
/// The allocator is used for temporary storage, but this function always returns
/// with no resources allocated.
@@ -373,6 +425,10 @@ pub const Module = struct {
.bitcast => return self.writeInstToStreamGeneric(stream, .bitcast, decl, inst_table),
.elemptr => return self.writeInstToStreamGeneric(stream, .elemptr, decl, inst_table),
.add => return self.writeInstToStreamGeneric(stream, .add, decl, inst_table),
.cmp => return self.writeInstToStreamGeneric(stream, .cmp, decl, inst_table),
.condbr => return self.writeInstToStreamGeneric(stream, .condbr, decl, inst_table),
.isnull => return self.writeInstToStreamGeneric(stream, .isnull, decl, inst_table),
.isnonnull => return self.writeInstToStreamGeneric(stream, .isnonnull, decl, inst_table),
}
}
@@ -432,7 +488,7 @@ pub const Module = struct {
}
try stream.writeByte(']');
},
Inst.Fn.Body => {
Module.Body => {
try stream.writeAll("{\n");
for (param.instructions) |inst, i| {
try stream.print(" %{} ", .{i});
@@ -497,7 +553,7 @@ const Parser = struct {
name_map: std.StringHashMap(usize),
};
fn parseBody(self: *Parser) !Inst.Fn.Body {
fn parseBody(self: *Parser) !Module.Body {
var body_context = Body{
.instructions = std.ArrayList(*Inst).init(self.allocator),
.name_map = std.StringHashMap(usize).init(self.allocator),
@@ -535,7 +591,7 @@ const Parser = struct {
// Move the instructions to the arena
const instrs = try self.arena.allocator.alloc(*Inst, body_context.instructions.items.len);
mem.copy(*Inst, instrs, body_context.instructions.items);
return Inst.Fn.Body{ .instructions = instrs };
return Module.Body{ .instructions = instrs };
}
fn parseStringLiteral(self: *Parser) ![]u8 {
@@ -754,7 +810,7 @@ const Parser = struct {
};
}
switch (T) {
Inst.Fn.Body => return parseBody(self),
Module.Body => return parseBody(self),
bool => {
const bool_value = switch (self.source[self.i]) {
'0' => false,
@@ -880,11 +936,12 @@ const EmitZIR = struct {
}
fn emitComptimeIntVal(self: *EmitZIR, src: usize, val: Value) !*Inst {
const big_int_space = try self.arena.allocator.create(Value.BigIntSpace);
const int_inst = try self.arena.allocator.create(Inst.Int);
int_inst.* = .{
.base = .{ .src = src, .tag = Inst.Int.base_tag },
.positionals = .{
.int = try val.toBigInt(&self.arena.allocator),
.int = val.toBigInt(big_int_space),
},
.kw_args = .{},
};
@@ -939,85 +996,7 @@ const EmitZIR = struct {
var instructions = std.ArrayList(*Inst).init(self.allocator);
defer instructions.deinit();
for (module_fn.body) |inst| {
const new_inst = switch (inst.tag) {
.unreach => blk: {
const unreach_inst = try self.arena.allocator.create(Inst.Unreachable);
unreach_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.Unreachable.base_tag },
.positionals = .{},
.kw_args = .{},
};
break :blk &unreach_inst.base;
},
.constant => unreachable, // excluded from function bodies
.assembly => blk: {
const old_inst = inst.cast(ir.Inst.Assembly).?;
const new_inst = try self.arena.allocator.create(Inst.Asm);
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len);
for (inputs) |*elem, i| {
elem.* = try self.emitStringLiteral(inst.src, old_inst.args.inputs[i]);
}
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len);
for (clobbers) |*elem, i| {
elem.* = try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i]);
}
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
for (args) |*elem, i| {
elem.* = try self.resolveInst(&inst_table, old_inst.args.args[i]);
}
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.Asm.base_tag },
.positionals = .{
.asm_source = try self.emitStringLiteral(inst.src, old_inst.args.asm_source),
.return_type = try self.emitType(inst.src, inst.ty),
},
.kw_args = .{
.@"volatile" = old_inst.args.is_volatile,
.output = if (old_inst.args.output) |o|
try self.emitStringLiteral(inst.src, o)
else
null,
.inputs = inputs,
.clobbers = clobbers,
.args = args,
},
};
break :blk &new_inst.base;
},
.ptrtoint => blk: {
const old_inst = inst.cast(ir.Inst.PtrToInt).?;
const new_inst = try self.arena.allocator.create(Inst.PtrToInt);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.PtrToInt.base_tag },
.positionals = .{
.ptr = try self.resolveInst(&inst_table, old_inst.args.ptr),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.bitcast => blk: {
const old_inst = inst.cast(ir.Inst.BitCast).?;
const new_inst = try self.arena.allocator.create(Inst.BitCast);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.BitCast.base_tag },
.positionals = .{
.dest_type = try self.emitType(inst.src, inst.ty),
.operand = try self.resolveInst(&inst_table, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
};
try instructions.append(new_inst);
try inst_table.putNoClobber(inst, new_inst);
}
try self.emitBody(module_fn.body, &inst_table, &instructions);
const fn_type = try self.emitType(src, module_fn.fn_type);
@@ -1039,6 +1018,155 @@ const EmitZIR = struct {
}
}
fn emitBody(
self: *EmitZIR,
body: ir.Module.Body,
inst_table: *std.AutoHashMap(*ir.Inst, *Inst),
instructions: *std.ArrayList(*Inst),
) Allocator.Error!void {
for (body.instructions) |inst| {
const new_inst = switch (inst.tag) {
.unreach => blk: {
const unreach_inst = try self.arena.allocator.create(Inst.Unreachable);
unreach_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.Unreachable.base_tag },
.positionals = .{},
.kw_args = .{},
};
break :blk &unreach_inst.base;
},
.constant => unreachable, // excluded from function bodies
.assembly => blk: {
const old_inst = inst.cast(ir.Inst.Assembly).?;
const new_inst = try self.arena.allocator.create(Inst.Asm);
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len);
for (inputs) |*elem, i| {
elem.* = try self.emitStringLiteral(inst.src, old_inst.args.inputs[i]);
}
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len);
for (clobbers) |*elem, i| {
elem.* = try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i]);
}
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
for (args) |*elem, i| {
elem.* = try self.resolveInst(inst_table, old_inst.args.args[i]);
}
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.Asm.base_tag },
.positionals = .{
.asm_source = try self.emitStringLiteral(inst.src, old_inst.args.asm_source),
.return_type = try self.emitType(inst.src, inst.ty),
},
.kw_args = .{
.@"volatile" = old_inst.args.is_volatile,
.output = if (old_inst.args.output) |o|
try self.emitStringLiteral(inst.src, o)
else
null,
.inputs = inputs,
.clobbers = clobbers,
.args = args,
},
};
break :blk &new_inst.base;
},
.ptrtoint => blk: {
const old_inst = inst.cast(ir.Inst.PtrToInt).?;
const new_inst = try self.arena.allocator.create(Inst.PtrToInt);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.PtrToInt.base_tag },
.positionals = .{
.ptr = try self.resolveInst(inst_table, old_inst.args.ptr),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.bitcast => blk: {
const old_inst = inst.cast(ir.Inst.BitCast).?;
const new_inst = try self.arena.allocator.create(Inst.BitCast);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.BitCast.base_tag },
.positionals = .{
.dest_type = try self.emitType(inst.src, inst.ty),
.operand = try self.resolveInst(inst_table, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.cmp => blk: {
const old_inst = inst.cast(ir.Inst.Cmp).?;
const new_inst = try self.arena.allocator.create(Inst.Cmp);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.Cmp.base_tag },
.positionals = .{
.lhs = try self.resolveInst(inst_table, old_inst.args.lhs),
.rhs = try self.resolveInst(inst_table, old_inst.args.rhs),
.op = old_inst.args.op,
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.condbr => blk: {
const old_inst = inst.cast(ir.Inst.CondBr).?;
var true_body = std.ArrayList(*Inst).init(self.allocator);
var false_body = std.ArrayList(*Inst).init(self.allocator);
defer true_body.deinit();
defer false_body.deinit();
try self.emitBody(old_inst.args.true_body, inst_table, &true_body);
try self.emitBody(old_inst.args.false_body, inst_table, &false_body);
const new_inst = try self.arena.allocator.create(Inst.CondBr);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.CondBr.base_tag },
.positionals = .{
.condition = try self.resolveInst(inst_table, old_inst.args.condition),
.true_body = .{ .instructions = true_body.toOwnedSlice() },
.false_body = .{ .instructions = false_body.toOwnedSlice() },
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.isnull => blk: {
const old_inst = inst.cast(ir.Inst.IsNull).?;
const new_inst = try self.arena.allocator.create(Inst.IsNull);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.IsNull.base_tag },
.positionals = .{
.operand = try self.resolveInst(inst_table, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.isnonnull => blk: {
const old_inst = inst.cast(ir.Inst.IsNonNull).?;
const new_inst = try self.arena.allocator.create(Inst.IsNonNull);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.IsNonNull.base_tag },
.positionals = .{
.operand = try self.resolveInst(inst_table, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
};
try instructions.append(new_inst);
try inst_table.putNoClobber(inst, new_inst);
}
}
fn emitType(self: *EmitZIR, src: usize, ty: Type) Allocator.Error!*Inst {
switch (ty.tag()) {
.isize => return self.emitPrimitiveType(src, .isize),

View File

@@ -20,35 +20,37 @@ pub const Type = extern union {
pub fn zigTypeTag(self: Type) std.builtin.TypeId {
switch (self.tag()) {
.@"u8",
.@"i8",
.@"isize",
.@"usize",
.@"c_short",
.@"c_ushort",
.@"c_int",
.@"c_uint",
.@"c_long",
.@"c_ulong",
.@"c_longlong",
.@"c_ulonglong",
.@"c_longdouble",
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.int_signed,
.int_unsigned,
=> return .Int,
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.f16,
.f32,
.f64,
.f128,
=> return .Float,
.@"c_void" => return .Opaque,
.@"bool" => return .Bool,
.@"void" => return .Void,
.@"type" => return .Type,
.@"anyerror" => return .ErrorSet,
.@"comptime_int" => return .ComptimeInt,
.@"comptime_float" => return .ComptimeFloat,
.@"noreturn" => return .NoReturn,
.c_void => return .Opaque,
.bool => return .Bool,
.void => return .Void,
.type => return .Type,
.anyerror => return .ErrorSet,
.comptime_int => return .ComptimeInt,
.comptime_float => return .ComptimeFloat,
.noreturn => return .NoReturn,
.fn_naked_noreturn_no_args => return .Fn,
@@ -153,31 +155,31 @@ pub const Type = extern union {
while (true) {
const t = ty.tag();
switch (t) {
.@"u8",
.@"i8",
.@"isize",
.@"usize",
.@"c_short",
.@"c_ushort",
.@"c_int",
.@"c_uint",
.@"c_long",
.@"c_ulong",
.@"c_longlong",
.@"c_ulonglong",
.@"c_longdouble",
.@"c_void",
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.@"bool",
.@"void",
.@"type",
.@"anyerror",
.@"comptime_int",
.@"comptime_float",
.@"noreturn",
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.c_void,
.f16,
.f32,
.f64,
.f128,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
=> return out_stream.writeAll(@tagName(t)),
.const_slice_u8 => return out_stream.writeAll("[]const u8"),
@@ -200,6 +202,14 @@ pub const Type = extern union {
ty = payload.pointee_type;
continue;
},
.int_signed => {
const payload = @fieldParentPtr(Payload.IntSigned, "base", ty.ptr_otherwise);
return out_stream.print("i{}", .{payload.bits});
},
.int_unsigned => {
const payload = @fieldParentPtr(Payload.IntUnsigned, "base", ty.ptr_otherwise);
return out_stream.print("u{}", .{payload.bits});
},
}
unreachable;
}
@@ -207,31 +217,31 @@ pub const Type = extern union {
pub fn toValue(self: Type, allocator: *Allocator) Allocator.Error!Value {
switch (self.tag()) {
.@"u8" => return Value.initTag(.u8_type),
.@"i8" => return Value.initTag(.i8_type),
.@"isize" => return Value.initTag(.isize_type),
.@"usize" => return Value.initTag(.usize_type),
.@"c_short" => return Value.initTag(.c_short_type),
.@"c_ushort" => return Value.initTag(.c_ushort_type),
.@"c_int" => return Value.initTag(.c_int_type),
.@"c_uint" => return Value.initTag(.c_uint_type),
.@"c_long" => return Value.initTag(.c_long_type),
.@"c_ulong" => return Value.initTag(.c_ulong_type),
.@"c_longlong" => return Value.initTag(.c_longlong_type),
.@"c_ulonglong" => return Value.initTag(.c_ulonglong_type),
.@"c_longdouble" => return Value.initTag(.c_longdouble_type),
.@"c_void" => return Value.initTag(.c_void_type),
.@"f16" => return Value.initTag(.f16_type),
.@"f32" => return Value.initTag(.f32_type),
.@"f64" => return Value.initTag(.f64_type),
.@"f128" => return Value.initTag(.f128_type),
.@"bool" => return Value.initTag(.bool_type),
.@"void" => return Value.initTag(.void_type),
.@"type" => return Value.initTag(.type_type),
.@"anyerror" => return Value.initTag(.anyerror_type),
.@"comptime_int" => return Value.initTag(.comptime_int_type),
.@"comptime_float" => return Value.initTag(.comptime_float_type),
.@"noreturn" => return Value.initTag(.noreturn_type),
.u8 => return Value.initTag(.u8_type),
.i8 => return Value.initTag(.i8_type),
.isize => return Value.initTag(.isize_type),
.usize => return Value.initTag(.usize_type),
.c_short => return Value.initTag(.c_short_type),
.c_ushort => return Value.initTag(.c_ushort_type),
.c_int => return Value.initTag(.c_int_type),
.c_uint => return Value.initTag(.c_uint_type),
.c_long => return Value.initTag(.c_long_type),
.c_ulong => return Value.initTag(.c_ulong_type),
.c_longlong => return Value.initTag(.c_longlong_type),
.c_ulonglong => return Value.initTag(.c_ulonglong_type),
.c_longdouble => return Value.initTag(.c_longdouble_type),
.c_void => return Value.initTag(.c_void_type),
.f16 => return Value.initTag(.f16_type),
.f32 => return Value.initTag(.f32_type),
.f64 => return Value.initTag(.f64_type),
.f128 => return Value.initTag(.f128_type),
.bool => return Value.initTag(.bool_type),
.void => return Value.initTag(.void_type),
.type => return Value.initTag(.type_type),
.anyerror => return Value.initTag(.anyerror_type),
.comptime_int => return Value.initTag(.comptime_int_type),
.comptime_float => return Value.initTag(.comptime_float_type),
.noreturn => return Value.initTag(.noreturn_type),
.fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type),
.single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type),
.const_slice_u8 => return Value.initTag(.const_slice_u8_type),
@@ -245,35 +255,37 @@ pub const Type = extern union {
pub fn isSinglePointer(self: Type) bool {
return switch (self.tag()) {
.@"u8",
.@"i8",
.@"isize",
.@"usize",
.@"c_short",
.@"c_ushort",
.@"c_int",
.@"c_uint",
.@"c_long",
.@"c_ulong",
.@"c_longlong",
.@"c_ulonglong",
.@"c_longdouble",
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.@"c_void",
.@"bool",
.@"void",
.@"type",
.@"anyerror",
.@"comptime_int",
.@"comptime_float",
.@"noreturn",
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.array,
.array_u8_sentinel_0,
.const_slice_u8,
.fn_naked_noreturn_no_args,
.int_unsigned,
.int_signed,
=> false,
.single_const_pointer,
@@ -284,36 +296,38 @@ pub const Type = extern union {
pub fn isSlice(self: Type) bool {
return switch (self.tag()) {
.@"u8",
.@"i8",
.@"isize",
.@"usize",
.@"c_short",
.@"c_ushort",
.@"c_int",
.@"c_uint",
.@"c_long",
.@"c_ulong",
.@"c_longlong",
.@"c_ulonglong",
.@"c_longdouble",
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.@"c_void",
.@"bool",
.@"void",
.@"type",
.@"anyerror",
.@"comptime_int",
.@"comptime_float",
.@"noreturn",
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.array,
.array_u8_sentinel_0,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
.fn_naked_noreturn_no_args,
.int_unsigned,
.int_signed,
=> false,
.const_slice_u8 => true,
@@ -323,34 +337,36 @@ pub const Type = extern union {
/// Asserts the type is a pointer type.
pub fn pointerIsConst(self: Type) bool {
return switch (self.tag()) {
.@"u8",
.@"i8",
.@"isize",
.@"usize",
.@"c_short",
.@"c_ushort",
.@"c_int",
.@"c_uint",
.@"c_long",
.@"c_ulong",
.@"c_longlong",
.@"c_ulonglong",
.@"c_longdouble",
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.@"c_void",
.@"bool",
.@"void",
.@"type",
.@"anyerror",
.@"comptime_int",
.@"comptime_float",
.@"noreturn",
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.array,
.array_u8_sentinel_0,
.fn_naked_noreturn_no_args,
.int_unsigned,
.int_signed,
=> unreachable,
.single_const_pointer,
@@ -363,32 +379,34 @@ pub const Type = extern union {
/// Asserts the type is a pointer or array type.
pub fn elemType(self: Type) Type {
return switch (self.tag()) {
.@"u8",
.@"i8",
.@"isize",
.@"usize",
.@"c_short",
.@"c_ushort",
.@"c_int",
.@"c_uint",
.@"c_long",
.@"c_ulong",
.@"c_longlong",
.@"c_ulonglong",
.@"c_longdouble",
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.@"c_void",
.@"bool",
.@"void",
.@"type",
.@"anyerror",
.@"comptime_int",
.@"comptime_float",
.@"noreturn",
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.fn_naked_noreturn_no_args,
.int_unsigned,
.int_signed,
=> unreachable,
.array => self.cast(Payload.Array).?.elem_type,
@@ -398,7 +416,7 @@ pub const Type = extern union {
};
}
/// Asserts the type is an array.
/// Asserts the type is an array or vector.
pub fn arrayLen(self: Type) u64 {
return switch (self.tag()) {
.u8,
@@ -430,6 +448,8 @@ pub const Type = extern union {
.single_const_pointer,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.int_unsigned,
.int_signed,
=> unreachable,
.array => self.cast(Payload.Array).?.len,
@@ -437,22 +457,64 @@ pub const Type = extern union {
};
}
/// Returns true if and only if the type is a fixed-width, signed integer.
pub fn isSignedInt(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.fn_naked_noreturn_no_args,
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
.array_u8_sentinel_0,
.const_slice_u8,
.int_unsigned,
.u8,
.usize,
.c_ushort,
.c_uint,
.c_ulong,
.c_ulonglong,
=> false,
.int_signed,
.i8,
.isize,
.c_short,
.c_int,
.c_long,
.c_longlong,
=> true,
};
}
/// Asserts the type is a fixed-width integer.
pub fn intInfo(self: Type, target: Target) struct { signed: bool, bits: u16 } {
return switch (self.tag()) {
.@"f16",
.@"f32",
.@"f64",
.@"f128",
.@"c_longdouble",
.@"c_void",
.@"bool",
.@"void",
.@"type",
.@"anyerror",
.@"comptime_int",
.@"comptime_float",
.@"noreturn",
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.fn_naked_noreturn_no_args,
.array,
.single_const_pointer,
@@ -461,18 +523,46 @@ pub const Type = extern union {
.const_slice_u8,
=> unreachable,
.@"u8" => .{ .signed = false, .bits = 8 },
.@"i8" => .{ .signed = true, .bits = 8 },
.@"usize" => .{ .signed = false, .bits = target.cpu.arch.ptrBitWidth() },
.@"isize" => .{ .signed = true, .bits = target.cpu.arch.ptrBitWidth() },
.@"c_short" => .{ .signed = true, .bits = CInteger.short.sizeInBits(target) },
.@"c_ushort" => .{ .signed = false, .bits = CInteger.ushort.sizeInBits(target) },
.@"c_int" => .{ .signed = true, .bits = CInteger.int.sizeInBits(target) },
.@"c_uint" => .{ .signed = false, .bits = CInteger.uint.sizeInBits(target) },
.@"c_long" => .{ .signed = true, .bits = CInteger.long.sizeInBits(target) },
.@"c_ulong" => .{ .signed = false, .bits = CInteger.ulong.sizeInBits(target) },
.@"c_longlong" => .{ .signed = true, .bits = CInteger.longlong.sizeInBits(target) },
.@"c_ulonglong" => .{ .signed = false, .bits = CInteger.ulonglong.sizeInBits(target) },
.int_unsigned => .{ .signed = false, .bits = self.cast(Payload.IntUnsigned).?.bits },
.int_signed => .{ .signed = true, .bits = self.cast(Payload.IntSigned).?.bits },
.u8 => .{ .signed = false, .bits = 8 },
.i8 => .{ .signed = true, .bits = 8 },
.usize => .{ .signed = false, .bits = target.cpu.arch.ptrBitWidth() },
.isize => .{ .signed = true, .bits = target.cpu.arch.ptrBitWidth() },
.c_short => .{ .signed = true, .bits = CType.short.sizeInBits(target) },
.c_ushort => .{ .signed = false, .bits = CType.ushort.sizeInBits(target) },
.c_int => .{ .signed = true, .bits = CType.int.sizeInBits(target) },
.c_uint => .{ .signed = false, .bits = CType.uint.sizeInBits(target) },
.c_long => .{ .signed = true, .bits = CType.long.sizeInBits(target) },
.c_ulong => .{ .signed = false, .bits = CType.ulong.sizeInBits(target) },
.c_longlong => .{ .signed = true, .bits = CType.longlong.sizeInBits(target) },
.c_ulonglong => .{ .signed = false, .bits = CType.ulonglong.sizeInBits(target) },
};
}
pub fn isFloat(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
=> true,
else => false,
};
}
/// Asserts the type is a fixed-size float.
pub fn floatBits(self: Type, target: Target) u16 {
return switch (self.tag()) {
.f16 => 16,
.f32 => 32,
.f64 => 64,
.f128 => 128,
.c_longdouble => CType.longdouble.sizeInBits(target),
else => unreachable,
};
}
@@ -511,6 +601,8 @@ pub const Type = extern union {
.c_ulong,
.c_longlong,
.c_ulonglong,
.int_unsigned,
.int_signed,
=> unreachable,
};
}
@@ -551,6 +643,8 @@ pub const Type = extern union {
.c_ulong,
.c_longlong,
.c_ulonglong,
.int_unsigned,
.int_signed,
=> unreachable,
}
}
@@ -590,6 +684,8 @@ pub const Type = extern union {
.c_ulong,
.c_longlong,
.c_ulonglong,
.int_unsigned,
.int_signed,
=> unreachable,
};
}
@@ -629,10 +725,145 @@ pub const Type = extern union {
.c_ulong,
.c_longlong,
.c_ulonglong,
.int_unsigned,
.int_signed,
=> unreachable,
};
}
pub fn isNumeric(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
.comptime_int,
.comptime_float,
.u8,
.i8,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.int_unsigned,
.int_signed,
=> true,
.c_void,
.bool,
.void,
.type,
.anyerror,
.noreturn,
.fn_naked_noreturn_no_args,
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
.array_u8_sentinel_0,
.const_slice_u8,
=> false,
};
}
pub fn onePossibleValue(self: Type) bool {
var ty = self;
while (true) switch (ty.tag()) {
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
.comptime_int,
.comptime_float,
.u8,
.i8,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.bool,
.type,
.anyerror,
.fn_naked_noreturn_no_args,
.single_const_pointer_to_comptime_int,
.array_u8_sentinel_0,
.const_slice_u8,
=> return false,
.c_void,
.void,
.noreturn,
=> return true,
.int_unsigned => return ty.cast(Payload.IntUnsigned).?.bits == 0,
.int_signed => return ty.cast(Payload.IntSigned).?.bits == 0,
.array => {
const array = ty.cast(Payload.Array).?;
if (array.len == 0)
return true;
ty = array.elem_type;
continue;
},
.single_const_pointer => {
const ptr = ty.cast(Payload.SingleConstPointer).?;
ty = ptr.pointee_type;
continue;
},
};
}
pub fn isCPtr(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
.comptime_int,
.comptime_float,
.u8,
.i8,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.bool,
.type,
.anyerror,
.fn_naked_noreturn_no_args,
.single_const_pointer_to_comptime_int,
.array_u8_sentinel_0,
.const_slice_u8,
.c_void,
.void,
.noreturn,
.int_unsigned,
.int_signed,
.array,
.single_const_pointer,
=> return false,
};
}
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
@@ -674,6 +905,8 @@ pub const Type = extern union {
array_u8_sentinel_0,
array,
single_const_pointer,
int_signed,
int_unsigned,
pub const last_no_payload_tag = Tag.const_slice_u8;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -700,10 +933,22 @@ pub const Type = extern union {
pointee_type: Type,
};
pub const IntSigned = struct {
base: Payload = Payload{ .tag = .int_signed },
bits: u16,
};
pub const IntUnsigned = struct {
base: Payload = Payload{ .tag = .int_unsigned },
bits: u16,
};
};
};
pub const CInteger = enum {
pub const CType = enum {
short,
ushort,
int,
@@ -712,8 +957,9 @@ pub const CInteger = enum {
ulong,
longlong,
ulonglong,
longdouble,
pub fn sizeInBits(self: CInteger, target: Target) u16 {
pub fn sizeInBits(self: CType, target: Target) u16 {
const arch = target.cpu.arch;
switch (target.os.tag) {
.freestanding, .other => switch (target.cpu.arch) {
@@ -729,6 +975,7 @@ pub const CInteger = enum {
.longlong,
.ulonglong,
=> return 64,
.longdouble => @panic("TODO figure out what kind of float `long double` is on this target"),
},
else => switch (self) {
.short,
@@ -743,6 +990,7 @@ pub const CInteger = enum {
.longlong,
.ulonglong,
=> return 64,
.longdouble => @panic("TODO figure out what kind of float `long double` is on this target"),
},
},
@@ -767,6 +1015,7 @@ pub const CInteger = enum {
.longlong,
.ulonglong,
=> return 64,
.longdouble => @panic("TODO figure out what kind of float `long double` is on this target"),
},
.windows, .uefi => switch (self) {
@@ -781,6 +1030,7 @@ pub const CInteger = enum {
.longlong,
.ulonglong,
=> return 64,
.longdouble => @panic("TODO figure out what kind of float `long double` is on this target"),
},
.ios => switch (self) {
@@ -795,6 +1045,7 @@ pub const CInteger = enum {
.longlong,
.ulonglong,
=> return 64,
.longdouble => @panic("TODO figure out what kind of float `long double` is on this target"),
},
.ananas,
@@ -821,7 +1072,7 @@ pub const CInteger = enum {
.amdpal,
.hermit,
.hurd,
=> @panic("TODO specify the C integer type sizes for this OS"),
=> @panic("TODO specify the C integer and float type sizes for this OS"),
}
}
};

View File

@@ -48,9 +48,10 @@ pub const Value = extern union {
single_const_pointer_to_comptime_int_type,
const_slice_u8_type,
undef,
zero,
void_value,
noreturn_value,
the_one_possible_value, // when the type only has one possible value
null_value,
bool_true,
bool_false, // See last_no_payload_tag below.
// After this, the tag requires a payload.
@@ -63,6 +64,7 @@ pub const Value = extern union {
ref,
ref_val,
bytes,
repeated, // the value is a value repeated some number of times
pub const last_no_payload_tag = Tag.bool_false;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -135,9 +137,10 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
.const_slice_u8_type => return out_stream.writeAll("[]const u8"),
.null_value => return out_stream.writeAll("null"),
.undef => return out_stream.writeAll("undefined"),
.zero => return out_stream.writeAll("0"),
.void_value => return out_stream.writeAll("{}"),
.noreturn_value => return out_stream.writeAll("unreachable"),
.the_one_possible_value => return out_stream.writeAll("(one possible value)"),
.bool_true => return out_stream.writeAll("true"),
.bool_false => return out_stream.writeAll("false"),
.ty => return val.cast(Payload.Ty).?.ty.format("", options, out_stream),
@@ -152,6 +155,10 @@ pub const Value = extern union {
continue;
},
.bytes => return std.zig.renderStringLiteral(self.cast(Payload.Bytes).?.data, out_stream),
.repeated => {
try out_stream.writeAll("(repeated) ");
val = val.cast(Payload.Repeated).?.val;
},
};
}
@@ -198,11 +205,12 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int),
.const_slice_u8_type => Type.initTag(.const_slice_u8),
.undef,
.zero,
.void_value,
.noreturn_value,
.the_one_possible_value,
.bool_true,
.bool_false,
.null_value,
.int_u64,
.int_i64,
.int_big,
@@ -210,12 +218,13 @@ pub const Value = extern union {
.ref,
.ref_val,
.bytes,
.repeated,
=> unreachable,
};
}
/// Asserts the value is an integer.
pub fn toBigInt(self: Value, allocator: *Allocator) Allocator.Error!BigInt {
pub fn toBigInt(self: Value, space: *BigIntSpace) BigInt {
switch (self.tag()) {
.ty,
.u8_type,
@@ -246,20 +255,23 @@ pub const Value = extern union {
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.void_value,
.noreturn_value,
.bool_true,
.bool_false,
.null_value,
.function,
.ref,
.ref_val,
.bytes,
.undef,
.repeated,
=> unreachable,
.zero => return BigInt.initSet(allocator, 0),
.the_one_possible_value, // An integer with one possible value is always zero.
.zero,
=> return BigInt.initSetFixed(&space.limbs, 0),
.int_u64 => return BigInt.initSet(allocator, self.cast(Payload.Int_u64).?.int),
.int_i64 => return BigInt.initSet(allocator, self.cast(Payload.Int_i64).?.int),
.int_u64 => return BigInt.initSetFixed(&space.limbs, self.cast(Payload.Int_u64).?.int),
.int_i64 => return BigInt.initSetFixed(&space.limbs, self.cast(Payload.Int_i64).?.int),
.int_big => return self.cast(Payload.IntBig).?.big_int,
}
}
@@ -296,17 +308,20 @@ pub const Value = extern union {
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.void_value,
.noreturn_value,
.bool_true,
.bool_false,
.null_value,
.function,
.ref,
.ref_val,
.bytes,
.undef,
.repeated,
=> unreachable,
.zero => return 0,
.zero,
.the_one_possible_value, // an integer with one possible value is always zero
=> return 0,
.int_u64 => return self.cast(Payload.Int_u64).?.int,
.int_i64 => return @intCast(u64, self.cast(Payload.Int_u64).?.int),
@@ -314,6 +329,66 @@ pub const Value = extern union {
}
}
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
pub fn intBitCountTwosComp(self: Value) usize {
switch (self.tag()) {
.ty,
.u8_type,
.i8_type,
.isize_type,
.usize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.bool_true,
.bool_false,
.null_value,
.function,
.ref,
.ref_val,
.bytes,
.undef,
.repeated,
=> unreachable,
.the_one_possible_value, // an integer with one possible value is always zero
.zero,
=> return 0,
.int_u64 => {
const x = self.cast(Payload.Int_u64).?.int;
if (x == 0) return 0;
return std.math.log2(x) + 1;
},
.int_i64 => {
@panic("TODO implement i64 intBitCountTwosComp");
},
.int_big => return self.cast(Payload.IntBig).?.big_int.bitCountTwosComp(),
}
}
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
pub fn intFitsInType(self: Value, ty: Type, target: Target) bool {
switch (self.tag()) {
@@ -346,17 +421,20 @@ pub const Value = extern union {
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.void_value,
.noreturn_value,
.bool_true,
.bool_false,
.null_value,
.function,
.ref,
.ref_val,
.bytes,
.repeated,
=> unreachable,
.zero => return true,
.zero,
.undef,
.the_one_possible_value, // an integer with one possible value is always zero
=> return true,
.int_u64 => switch (ty.zigTypeTag()) {
.Int => {
@@ -392,9 +470,148 @@ pub const Value = extern union {
}
}
/// Asserts the value is a float
pub fn floatHasFraction(self: Value) bool {
return switch (self.tag()) {
.ty,
.u8_type,
.i8_type,
.isize_type,
.usize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.bool_true,
.bool_false,
.null_value,
.function,
.ref,
.ref_val,
.bytes,
.repeated,
.undef,
.int_u64,
.int_i64,
.int_big,
.the_one_possible_value,
=> unreachable,
.zero => false,
};
}
pub fn orderAgainstZero(lhs: Value) std.math.Order {
switch (lhs.tag()) {
.ty,
.u8_type,
.i8_type,
.isize_type,
.usize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.bool_true,
.bool_false,
.null_value,
.function,
.ref,
.ref_val,
.bytes,
.repeated,
.undef,
=> unreachable,
.zero,
.the_one_possible_value, // an integer with one possible value is always zero
=> return .eq,
.int_u64 => return std.math.order(lhs.cast(Payload.Int_u64).?.int, 0),
.int_i64 => return std.math.order(lhs.cast(Payload.Int_i64).?.int, 0),
.int_big => return lhs.cast(Payload.IntBig).?.big_int.orderAgainstScalar(0),
}
}
/// Asserts the value is comparable.
pub fn order(lhs: Value, rhs: Value) std.math.Order {
const lhs_tag = lhs.tag();
const rhs_tag = lhs.tag();
const lhs_is_zero = lhs_tag == .zero or lhs_tag == .the_one_possible_value;
const rhs_is_zero = rhs_tag == .zero or rhs_tag == .the_one_possible_value;
if (lhs_is_zero) return rhs.orderAgainstZero().invert();
if (rhs_is_zero) return lhs.orderAgainstZero();
// TODO floats
var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_bigint_space);
const rhs_bigint = rhs.toBigInt(&rhs_bigint_space);
return BigInt.cmp(lhs_bigint, rhs_bigint);
}
/// Asserts the value is comparable.
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool {
return order(lhs, rhs).compare(op);
}
/// Asserts the value is comparable.
pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool {
return orderAgainstZero(lhs).compare(op);
}
pub fn toBool(self: Value) bool {
return switch (self.tag()) {
.bool_true => true,
.bool_false => false,
else => unreachable,
};
}
/// Asserts the value is a pointer and dereferences it.
pub fn pointerDeref(self: Value) Value {
switch (self.tag()) {
return switch (self.tag()) {
.ty,
.u8_type,
.i8_type,
@@ -425,20 +642,22 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.zero,
.void_value,
.noreturn_value,
.bool_true,
.bool_false,
.null_value,
.function,
.int_u64,
.int_i64,
.int_big,
.bytes,
.undef,
.repeated,
=> unreachable,
.ref => return self.cast(Payload.Ref).?.cell.contents,
.ref_val => return self.cast(Payload.RefVal).?.val,
}
.the_one_possible_value => Value.initTag(.the_one_possible_value),
.ref => self.cast(Payload.Ref).?.cell.contents,
.ref_val => self.cast(Payload.RefVal).?.val,
};
}
/// Asserts the value is a single-item pointer to an array, or an array,
@@ -475,14 +694,15 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.zero,
.void_value,
.noreturn_value,
.the_one_possible_value,
.bool_true,
.bool_false,
.null_value,
.function,
.int_u64,
.int_i64,
.int_big,
.undef,
=> unreachable,
.ref => @panic("TODO figure out how MemoryCell works"),
@@ -493,9 +713,68 @@ pub const Value = extern union {
int_payload.* = .{ .int = self.cast(Payload.Bytes).?.data[index] };
return Value.initPayload(&int_payload.base);
},
// No matter the index; all the elements are the same!
.repeated => return self.cast(Payload.Repeated).?.val,
}
}
pub fn isUndef(self: Value) bool {
return self.tag() == .undef;
}
/// Valid for all types. Asserts the value is not undefined.
/// `.the_one_possible_value` is reported as not null.
pub fn isNull(self: Value) bool {
return switch (self.tag()) {
.ty,
.u8_type,
.i8_type,
.isize_type,
.usize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.zero,
.the_one_possible_value,
.bool_true,
.bool_false,
.function,
.int_u64,
.int_i64,
.int_big,
.ref,
.ref_val,
.bytes,
.repeated,
=> false,
.undef => unreachable,
.null_value => true,
};
}
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
@@ -550,6 +829,20 @@ pub const Value = extern union {
base: Payload = Payload{ .tag = .ty },
ty: Type,
};
pub const Repeated = struct {
base: Payload = Payload{ .tag = .ty },
/// This value is repeated some number of times. The amount of times to repeat
/// is stored externally.
val: Value,
};
};
/// Big enough to fit any non-BigInt value
pub const BigIntSpace = struct {
/// The +1 is headroom so that operations such as incrementing once or decrementing once
/// are possible without using an allocator.
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
};
};