compiler: improve "... contains reference to comptime var" errors

`Sema.explainWhyValueContainsReferenceToComptimeVar` (concise name!)
adds notes to an error explaining how to get from a given `Value` to a
pointer to some `comptime var` (or a comptime field). Previously, this
error could be very opaque in any case where it wasn't obvious where the
comptime var pointer came from; particularly for type captures. Now, the
error notes explain this to the user.
This commit is contained in:
mlugg
2025-01-05 11:28:16 +00:00
committed by Matthew Lugg
parent 6cfc9c0e02
commit 04c9f50aec
12 changed files with 567 additions and 228 deletions

View File

@@ -5315,8 +5315,9 @@ fn structDeclInner(
const fields_slice = wip_members.fieldsSlice();
const bodies_slice = astgen.scratch.items[bodies_start..];
try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + 2 +
decls_slice.len + namespace.captures.count() + fields_slice.len + bodies_slice.len);
decls_slice.len + namespace.captures.count() * 2 + fields_slice.len + bodies_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.values()));
if (backing_int_ref != .none) {
astgen.extra.appendAssumeCapacity(@intCast(backing_int_body_len));
if (backing_int_body_len == 0) {
@@ -5595,8 +5596,9 @@ fn unionDeclInner(
wip_members.finishBits(bits_per_field);
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len + body_len + fields_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() * 2 + decls_slice.len + body_len + fields_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.values()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.appendBodyWithFixups(body);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
@@ -5855,8 +5857,9 @@ fn containerDecl(
wip_members.finishBits(bits_per_field);
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len + body_len + fields_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() * 2 + decls_slice.len + body_len + fields_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.values()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.appendBodyWithFixups(body);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
@@ -5910,8 +5913,9 @@ fn containerDecl(
wip_members.finishBits(0);
const decls_slice = wip_members.declsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() * 2 + decls_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.values()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
block_scope.unstack();
@@ -8548,6 +8552,7 @@ fn localVarRef(
num_namespaces_out,
.{ .ref = local_val.inst },
.{ .token = local_val.token_src },
name_str_index,
) else local_val.inst;
return rvalueNoCoercePreRef(gz, ri, value_inst, ident);
@@ -8580,6 +8585,7 @@ fn localVarRef(
num_namespaces_out,
.{ .ref = local_ptr.ptr },
.{ .token = local_ptr.token_src },
name_str_index,
) else local_ptr.ptr;
local_ptr.used_as_lvalue = true;
return ptr_inst;
@@ -8591,6 +8597,7 @@ fn localVarRef(
num_namespaces_out,
.{ .ref_load = local_ptr.ptr },
.{ .token = local_ptr.token_src },
name_str_index,
) else try gz.addUnNode(.load, local_ptr.ptr, ident);
return rvalueNoCoercePreRef(gz, ri, val_inst, ident);
},
@@ -8636,6 +8643,7 @@ fn localVarRef(
found_namespaces_out,
.{ .decl_ref = name_str_index },
.{ .node = found_already.? },
name_str_index,
),
else => {
const result = try tunnelThroughClosure(
@@ -8644,6 +8652,7 @@ fn localVarRef(
found_namespaces_out,
.{ .decl_val = name_str_index },
.{ .node = found_already.? },
name_str_index,
);
return rvalueNoCoercePreRef(gz, ri, result, ident);
},
@@ -8680,6 +8689,7 @@ fn tunnelThroughClosure(
token: Ast.TokenIndex,
node: Ast.Node.Index,
},
name_str_index: Zir.NullTerminatedString,
) !Zir.Inst.Ref {
switch (value) {
.ref => |v| if (v.toIndex() == null) return v, // trivial value; do not need tunnel
@@ -8714,34 +8724,43 @@ fn tunnelThroughClosure(
// Now that we know the scopes we're tunneling through, begin adding
// captures as required, starting with the outermost namespace.
const root_capture = Zir.Inst.Capture.wrap(switch (value) {
const root_capture: Zir.Inst.Capture = .wrap(switch (value) {
.ref => |v| .{ .instruction = v.toIndex().? },
.ref_load => |v| .{ .instruction_load = v.toIndex().? },
.decl_val => |str| .{ .decl_val = str },
.decl_ref => |str| .{ .decl_ref = str },
});
var cur_capture_index = std.math.cast(
u16,
(try root_ns.captures.getOrPut(gpa, root_capture)).index,
) orelse return astgen.failNodeNotes(root_ns.node, "this compiler implementation only supports up to 65536 captures per namespace", .{}, &.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
});
for (intermediate_tunnels) |tunnel_ns| {
cur_capture_index = std.math.cast(
u16,
(try tunnel_ns.captures.getOrPut(gpa, Zir.Inst.Capture.wrap(.{ .nested = cur_capture_index }))).index,
) orelse return astgen.failNodeNotes(tunnel_ns.node, "this compiler implementation only supports up to 65536 captures per namespace", .{}, &.{
const root_gop = try root_ns.captures.getOrPut(gpa, root_capture);
root_gop.value_ptr.* = name_str_index;
var cur_capture_index = std.math.cast(u16, root_gop.index) orelse return astgen.failNodeNotes(
root_ns.node,
"this compiler implementation only supports up to 65536 captures per namespace",
.{},
&.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
});
},
);
for (intermediate_tunnels) |tunnel_ns| {
const tunnel_gop = try tunnel_ns.captures.getOrPut(gpa, .wrap(.{ .nested = cur_capture_index }));
tunnel_gop.value_ptr.* = name_str_index;
cur_capture_index = std.math.cast(u16, tunnel_gop.index) orelse return astgen.failNodeNotes(
tunnel_ns.node,
"this compiler implementation only supports up to 65536 captures per namespace",
.{},
&.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
},
);
}
// Incorporate the capture index into the source hash, so that changes in
@@ -11920,7 +11939,7 @@ const Scope = struct {
declaring_gz: ?*GenZir,
/// Set of captures used by this namespace.
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .empty,
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, Zir.NullTerminatedString) = .empty,
fn deinit(self: *Namespace, gpa: Allocator) void {
self.decls.deinit(gpa);

View File

@@ -3284,24 +3284,25 @@ pub const Inst = struct {
/// 1. fields_len: u32, // if has_fields_len
/// 2. decls_len: u32, // if has_decls_len
/// 3. capture: Capture // for every captures_len
/// 4. backing_int_body_len: u32, // if has_backing_int
/// 5. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 6. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 7. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 8. flags: u32 // for every 8 fields
/// 4. capture_name: NullTerminatedString // for every captures_len
/// 5. backing_int_body_len: u32, // if has_backing_int
/// 6. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 7. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 8. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 9. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: whether corresponding field has a type expression
/// 9. fields: { // for every fields_len
/// 10. fields: { // for every fields_len
/// field_name: u32,
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
/// field_type_body_len: u32, // if corresponding bit is set
/// align_body_len: u32, // if corresponding bit is set
/// init_body_len: u32, // if corresponding bit is set
/// }
/// 10. bodies: { // for every fields_len
/// 11. bodies: { // for every fields_len
/// field_type_body_inst: Inst, // for each field_type_body_len
/// align_body_inst: Inst, // for each align_body_len
/// init_body_inst: Inst, // for each init_body_len
@@ -3450,11 +3451,12 @@ pub const Inst = struct {
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 32 fields
/// 6. capture_name: NullTerminatedString // for every captures_len
/// 7. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 8. inst: Index // for every body_len
/// 9. has_bits: u32 // for every 32 fields
/// - the bit is whether corresponding field has an value expression
/// 9. fields: { // for every fields_len
/// 10. fields: { // for every fields_len
/// field_name: u32,
/// value: Ref, // if corresponding bit is set
/// }
@@ -3488,15 +3490,16 @@ pub const Inst = struct {
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 8 fields
/// 6. capture_name: NullTerminatedString // for every captures_len
/// 7. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 8. inst: Index // for every body_len
/// 9. has_bits: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has a type expression
/// 0b00X0: whether corresponding field has a align expression
/// 0b0X00: whether corresponding field has a tag value expression
/// 0bX000: unused
/// 9. fields: { // for every fields_len
/// 10. fields: { // for every fields_len
/// field_name: NullTerminatedString, // null terminated string index
/// field_type: Ref, // if corresponding bit is set
/// align: Ref, // if corresponding bit is set
@@ -3537,7 +3540,8 @@ pub const Inst = struct {
/// 0. captures_len: u32, // if has_captures_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. capture: Capture, // for every captures_len
/// 3. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 3. capture_name: NullTerminatedString // for every captures_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
pub const OpaqueDecl = struct {
src_line: u32,
/// This node provides a new absolute baseline node for all instructions within this struct.
@@ -3852,7 +3856,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
@@ -3887,7 +3891,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
return .{
.extra_index = extra_index,
@@ -3912,7 +3916,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
return .{
.extra_index = extra_index,
@@ -3934,7 +3938,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :captures_len captures_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
return .{
.extra_index = extra_index,
@@ -4349,7 +4353,7 @@ fn findTrackableInner(
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1;
@@ -4441,7 +4445,7 @@ fn findTrackableInner(
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
extra_index += decls_len;
const body = zir.bodySlice(extra_index, body_len);
try zir.findTrackableBody(gpa, contents, defers, body);
@@ -4471,7 +4475,7 @@ fn findTrackableInner(
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
extra_index += decls_len;
const body = zir.bodySlice(extra_index, body_len);
try zir.findTrackableBody(gpa, contents, defers, body);

View File

@@ -134,6 +134,7 @@ const MaybeComptimeAlloc = struct {
const ComptimeAlloc = struct {
val: MutableValue,
is_const: bool,
src: LazySrcLoc,
/// `.none` indicates that the alignment is the natural alignment of `val`.
alignment: Alignment,
/// This is the `runtime_index` at the point of this allocation. If an store
@@ -142,11 +143,13 @@ const ComptimeAlloc = struct {
runtime_index: RuntimeIndex,
};
fn newComptimeAlloc(sema: *Sema, block: *Block, ty: Type, alignment: Alignment) !ComptimeAllocIndex {
/// `src` may be `null` if `is_const` will be set.
fn newComptimeAlloc(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, alignment: Alignment) !ComptimeAllocIndex {
const idx = sema.comptime_allocs.items.len;
try sema.comptime_allocs.append(sema.gpa, .{
.val = .{ .interned = try sema.pt.intern(.{ .undef = ty.toIntern() }) },
.is_const = false,
.src = src,
.alignment = alignment,
.runtime_index = block.runtime_index,
});
@@ -1308,7 +1311,7 @@ fn analyzeBodyInner(
.shl_exact => try sema.zirShl(block, inst, .shl_exact),
.shl_sat => try sema.zirShl(block, inst, .shl_sat),
.ret_ptr => try sema.zirRetPtr(block),
.ret_ptr => try sema.zirRetPtr(block, inst),
.ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()),
// Instructions that we know to *always* be noreturn based solely on their tag.
@@ -2301,7 +2304,9 @@ pub fn resolveFinalDeclValue(
};
if (val.canMutateComptimeVarState(zcu)) {
return sema.fail(block, src, "global variable contains reference to comptime var", .{});
const ip = &zcu.intern_pool;
const nav = ip.getNav(sema.owner.unwrap().nav_val);
return sema.failWithContainsReferenceToComptimeVar(block, src, nav.name, "global variable", val);
}
return val;
@@ -2757,7 +2762,8 @@ fn zirTupleDecl(
const coerced_field_init = try sema.coerce(block, field_type, uncoerced_field_init, init_src);
const field_init_val = try sema.resolveConstDefinedValue(block, init_src, coerced_field_init, .{ .simple = .tuple_field_default_value });
if (field_init_val.canMutateComptimeVarState(zcu)) {
return sema.fail(block, init_src, "field default value contains reference to comptime-mutable memory", .{});
const field_name = try zcu.intern_pool.getOrPutStringFmt(gpa, pt.tid, "{}", .{field_index}, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, init_src, field_name, "field default value", field_init_val);
}
break :init field_init_val.toIntern();
}
@@ -2813,8 +2819,10 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len);
for (sema.code.extra[extra_index..][0..captures_len], captures) |raw, *capture| {
for (sema.code.extra[extra_index..][0..captures_len], sema.code.extra[extra_index + captures_len ..][0..captures_len], captures) |raw, raw_name, *capture| {
const zir_capture: Zir.Inst.Capture = @bitCast(raw);
const zir_name: Zir.NullTerminatedString = @enumFromInt(raw_name);
const zir_name_slice = sema.code.nullTerminatedString(zir_name);
capture.* = switch (zir_capture.unwrap()) {
.nested => |parent_idx| parent_captures.get(ip)[parent_idx],
.instruction_load => |ptr_inst| InternPool.CaptureValue.wrap(capture: {
@@ -2828,8 +2836,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
};
const loaded_val = try sema.resolveLazyValue(unresolved_loaded_val);
if (loaded_val.canMutateComptimeVarState(zcu)) {
// TODO: source location of captured value
return sema.fail(block, type_src, "type capture contains reference to comptime var", .{});
const field_name = try ip.getOrPutString(zcu.gpa, pt.tid, zir_name_slice, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, type_src, field_name, "captured value", loaded_val);
}
break :capture .{ .@"comptime" = loaded_val.toIntern() };
}),
@@ -2837,8 +2845,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
const air_ref = try sema.resolveInst(inst.toRef());
if (try sema.resolveValueResolveLazy(air_ref)) |val| {
if (val.canMutateComptimeVarState(zcu)) {
// TODO: source location of captured value
return sema.fail(block, type_src, "type capture contains reference to comptime var", .{});
const field_name = try ip.getOrPutString(zcu.gpa, pt.tid, zir_name_slice, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, type_src, field_name, "captured value", val);
}
break :capture .{ .@"comptime" = val.toIntern() };
}
@@ -2908,7 +2916,7 @@ fn zirStructDecl(
} else 0;
const captures = try sema.getCaptures(block, src, extra_index, captures_len);
extra_index += captures_len;
extra_index += captures_len * 2;
if (small.has_backing_int) {
const backing_int_body_len = sema.code.extra[extra_index];
@@ -3132,7 +3140,7 @@ fn zirEnumDecl(
} else 0;
const captures = try sema.getCaptures(block, src, extra_index, captures_len);
extra_index += captures_len;
extra_index += captures_len * 2;
const decls = sema.code.bodySlice(extra_index, decls_len);
extra_index += decls_len;
@@ -3275,7 +3283,7 @@ fn zirUnionDecl(
} else 0;
const captures = try sema.getCaptures(block, src, extra_index, captures_len);
extra_index += captures_len;
extra_index += captures_len * 2;
const union_init: InternPool.UnionTypeInit = .{
.flags = .{
@@ -3393,7 +3401,7 @@ fn zirOpaqueDecl(
} else 0;
const captures = try sema.getCaptures(block, src, extra_index, captures_len);
extra_index += captures_len;
extra_index += captures_len * 2;
const opaque_init: InternPool.OpaqueTypeInit = .{
.key = .{ .declared = .{
@@ -3474,15 +3482,17 @@ fn zirErrorSetDecl(
return Air.internedToRef((try pt.errorSetFromUnsortedNames(names.keys())).toIntern());
}
fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
fn zirRetPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const pt = sema.pt;
const src = block.nodeOffset(sema.code.instructions.items(.data)[@intFromEnum(inst)].node);
if (block.isComptime() or try sema.fn_ret_ty.comptimeOnlySema(pt)) {
try sema.fn_ret_ty.resolveFields(pt);
return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none);
return sema.analyzeComptimeAlloc(block, src, sema.fn_ret_ty, .none);
}
const target = pt.zcu.getTarget();
@@ -3658,6 +3668,7 @@ fn zirAllocExtended(
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
const ty_src = block.src(.{ .node_offset_var_decl_ty = extra.data.src_node });
const align_src = block.src(.{ .node_offset_var_decl_align = extra.data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = extra.data.src_node });
const small: Zir.Inst.AllocExtended.Small = @bitCast(extended.small);
var extra_index: usize = extra.end;
@@ -3676,7 +3687,7 @@ fn zirAllocExtended(
if (block.isComptime() or small.is_comptime) {
if (small.has_type) {
return sema.analyzeComptimeAlloc(block, var_ty, alignment);
return sema.analyzeComptimeAlloc(block, init_src, var_ty, alignment);
} else {
try sema.air_instructions.append(gpa, .{
.tag = .inferred_alloc_comptime,
@@ -3691,7 +3702,7 @@ fn zirAllocExtended(
}
if (small.has_type and try var_ty.comptimeOnlySema(pt)) {
return sema.analyzeComptimeAlloc(block, var_ty, alignment);
return sema.analyzeComptimeAlloc(block, init_src, var_ty, alignment);
}
if (small.has_type) {
@@ -3741,8 +3752,9 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
return sema.analyzeComptimeAlloc(block, var_ty, .none);
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
}
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -3860,7 +3872,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
// The simple strategy failed: we must create a mutable comptime alloc and
// perform all of the runtime store operations at comptime.
const ct_alloc = try sema.newComptimeAlloc(block, elem_ty, ptr_info.flags.alignment);
const ct_alloc = try sema.newComptimeAlloc(block, .unneeded, elem_ty, ptr_info.flags.alignment);
const alloc_ptr = try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
@@ -4073,7 +4085,7 @@ fn finishResolveComptimeKnownAllocPtr(
if (Value.fromInterned(result_val).canMutateComptimeVarState(zcu)) {
const alloc_index = existing_comptime_alloc orelse a: {
const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(zcu));
const idx = try sema.newComptimeAlloc(block, .unneeded, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(zcu));
const alloc = sema.getComptimeAlloc(idx);
alloc.val = .{ .interned = result_val };
break :a idx;
@@ -4139,10 +4151,11 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.isComptime() or try var_ty.comptimeOnlySema(pt)) {
return sema.analyzeComptimeAlloc(block, var_ty, .none);
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
}
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
const mut_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
@@ -4168,9 +4181,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.isComptime()) {
return sema.analyzeComptimeAlloc(block, var_ty, .none);
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
}
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
const var_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
@@ -5726,7 +5740,7 @@ fn storeToInferredAllocComptime(
.byte_offset = 0,
} });
} else {
const alloc_index = try sema.newComptimeAlloc(block, operand_ty, iac.alignment);
const alloc_index = try sema.newComptimeAlloc(block, src, operand_ty, iac.alignment);
sema.getComptimeAlloc(alloc_index).val = .{ .interned = operand_val.toIntern() };
iac.ptr = try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
@@ -31206,14 +31220,7 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins
}
const other_data = sema.air_instructions.items(.data)[@intFromEnum(other_inst)].bin_op;
const other_operand = other_data.rhs;
if (!sema.checkRuntimeValue(other_operand)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(other_src, "runtime value contains reference to comptime var", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(other_src, msg, "comptime var pointers are not available at runtime", .{});
break :msg msg;
});
}
try sema.validateRuntimeValue(block, other_src, other_operand);
}
}
@@ -35328,7 +35335,7 @@ fn backingIntType(
extra_index += @intFromBool(small.has_fields_len);
extra_index += @intFromBool(small.has_decls_len);
extra_index += captures_len;
extra_index += captures_len * 2;
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1;
@@ -35904,7 +35911,7 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
// The backing integer cannot be handled until `resolveStructLayout()`.
if (small.has_backing_int) {
@@ -36233,7 +36240,8 @@ fn structFieldInits(
const default_val = try sema.resolveConstValue(&block_scope, init_src, coerced, null);
if (default_val.canMutateComptimeVarState(zcu)) {
return sema.fail(&block_scope, init_src, "field default value contains reference to comptime-mutable memory", .{});
const field_name = struct_type.fieldName(ip, field_i).unwrap().?;
return sema.failWithContainsReferenceToComptimeVar(&block_scope, init_src, field_name, "field default value", default_val);
}
struct_type.field_inits.get(ip)[field_i] = default_val.toIntern();
}
@@ -36293,7 +36301,7 @@ fn unionFields(
} else 0;
// Skip over captures and decls.
extra_index += captures_len + decls_len;
extra_index += captures_len * 2 + decls_len;
const body = zir.bodySlice(extra_index, body_len);
extra_index += body.len;
@@ -37055,6 +37063,7 @@ fn isComptimeKnown(
fn analyzeComptimeAlloc(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
var_type: Type,
alignment: Alignment,
) CompileError!Air.Inst.Ref {
@@ -37072,7 +37081,7 @@ fn analyzeComptimeAlloc(
},
});
const alloc = try sema.newComptimeAlloc(block, var_type, alignment);
const alloc = try sema.newComptimeAlloc(block, src, var_type, alignment);
return Air.internedToRef((try pt.intern(.{ .ptr = .{
.ty = ptr_type.toIntern(),
@@ -37979,10 +37988,189 @@ fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Ai
const msg = try sema.errMsg(val_src, "runtime value contains reference to comptime var", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(val_src, msg, "comptime var pointers are not available at runtime", .{});
const pt = sema.pt;
const zcu = pt.zcu;
const val_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "runtime_value", .no_embedded_nulls);
try sema.explainWhyValueContainsReferenceToComptimeVar(msg, val_src, val_str, .fromInterned(val.toInterned().?));
break :msg msg;
});
}
fn failWithContainsReferenceToComptimeVar(sema: *Sema, block: *Block, src: LazySrcLoc, value_name: InternPool.NullTerminatedString, kind_of_value: []const u8, val: ?Value) CompileError {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "{s} contains reference to comptime var", .{kind_of_value});
errdefer msg.destroy(sema.gpa);
if (val) |v| try sema.explainWhyValueContainsReferenceToComptimeVar(msg, src, value_name, v);
break :msg msg;
});
}
fn explainWhyValueContainsReferenceToComptimeVar(sema: *Sema, msg: *Zcu.ErrorMsg, src: LazySrcLoc, value_name: InternPool.NullTerminatedString, val: Value) Allocator.Error!void {
// Our goal is something like this:
// note: '(value.? catch unreachable)[0]' points to 'v0.?.foo'
// note: '(v0.?.bar catch unreachable)' points to 'v1'
// note: 'v1.?' points to a comptime var
var intermediate_value_count: u32 = 0;
var cur_val: Value = val;
while (true) {
switch (try sema.notePathToComptimeAllocPtr(msg, src, cur_val, intermediate_value_count, value_name)) {
.done => return,
.new_val => |new_val| {
intermediate_value_count += 1;
cur_val = new_val;
},
}
}
}
fn notePathToComptimeAllocPtr(sema: *Sema, msg: *Zcu.ErrorMsg, src: LazySrcLoc, val: Value, intermediate_value_count: u32, start_value_name: InternPool.NullTerminatedString) Allocator.Error!union(enum) {
done,
new_val: Value,
} {
const arena = sema.arena;
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
var first_path: std.ArrayListUnmanaged(u8) = .empty;
if (intermediate_value_count == 0) {
try first_path.writer(arena).print("{i}", .{start_value_name.fmt(ip)});
} else {
try first_path.writer(arena).print("v{}", .{intermediate_value_count - 1});
}
const comptime_ptr = try sema.notePathToComptimeAllocPtrInner(val, &first_path);
switch (ip.indexToKey(comptime_ptr.toIntern()).ptr.base_addr) {
.comptime_field => {
try sema.errNote(src, msg, "'{s}' points to comptime field", .{first_path.items});
return .done;
},
.comptime_alloc => |idx| {
const cta = sema.getComptimeAlloc(idx);
if (!cta.is_const) {
try sema.errNote(cta.src, msg, "'{s}' points to comptime var declared here", .{first_path.items});
return .done;
}
},
else => {}, // there will be another stage
}
const derivation = comptime_ptr.pointerDerivationAdvanced(arena, pt, false, sema) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AnalysisFail => unreachable,
};
var second_path: std.ArrayListUnmanaged(u8) = .empty;
const inter_name = try std.fmt.allocPrint(arena, "v{d}", .{intermediate_value_count});
const deriv_start = @import("print_value.zig").printPtrDerivation(
derivation,
second_path.writer(arena),
pt,
.lvalue,
.{ .str = inter_name },
20,
) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AnalysisFail => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
error.ComptimeBreak => unreachable,
};
switch (deriv_start) {
.int, .nav_ptr => unreachable,
.uav_ptr => |uav| {
try sema.errNote(src, msg, "'{s}' points to '{s}', where", .{ first_path.items, second_path.items });
return .{ .new_val = .fromInterned(uav.val) };
},
.comptime_alloc_ptr => |cta_info| {
try sema.errNote(src, msg, "'{s}' points to '{s}', where", .{ first_path.items, second_path.items });
const cta = sema.getComptimeAlloc(cta_info.idx);
if (cta.is_const) {
return .{ .new_val = cta_info.val };
} else {
try sema.errNote(cta.src, msg, "'{s}' is a comptime var declared here", .{inter_name});
return .done;
}
},
.comptime_field_ptr => {
try sema.errNote(src, msg, "'{s}' points to '{s}', where", .{ first_path.items, second_path.items });
try sema.errNote(src, msg, "'{s}' is a comptime field", .{inter_name});
return .done;
},
.eu_payload_ptr,
.opt_payload_ptr,
.field_ptr,
.elem_ptr,
.offset_and_cast,
=> unreachable,
}
}
fn notePathToComptimeAllocPtrInner(sema: *Sema, val: Value, path: *std.ArrayListUnmanaged(u8)) Allocator.Error!Value {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const arena = sema.arena;
assert(val.canMutateComptimeVarState(zcu));
switch (ip.indexToKey(val.toIntern())) {
.ptr => return val,
.error_union => |eu| {
try path.insert(arena, 0, '(');
try path.appendSlice(arena, " catch unreachable)");
return sema.notePathToComptimeAllocPtrInner(.fromInterned(eu.val.payload), path);
},
.slice => |slice| {
try path.appendSlice(arena, ".ptr");
return sema.notePathToComptimeAllocPtrInner(.fromInterned(slice.ptr), path);
},
.opt => |opt| {
try path.appendSlice(arena, ".?");
return sema.notePathToComptimeAllocPtrInner(.fromInterned(opt.val), path);
},
.un => |un| {
assert(un.tag != .none);
const union_ty: Type = .fromInterned(un.ty);
const backing_enum = union_ty.unionTagTypeHypothetical(zcu);
const field_idx = backing_enum.enumTagFieldIndex(.fromInterned(un.tag), zcu).?;
const field_name = backing_enum.enumFieldName(field_idx, zcu);
try path.writer(arena).print(".{i}", .{field_name.fmt(ip)});
return sema.notePathToComptimeAllocPtrInner(.fromInterned(un.val), path);
},
.aggregate => |agg| {
const elem: InternPool.Index, const elem_idx: usize = switch (agg.storage) {
.bytes => unreachable,
.repeated_elem => |elem| .{ elem, 0 },
.elems => |elems| for (elems, 0..) |elem, elem_idx| {
if (Value.fromInterned(elem).canMutateComptimeVarState(zcu)) {
break .{ elem, elem_idx };
}
} else unreachable,
};
const agg_ty: Type = .fromInterned(agg.ty);
switch (agg_ty.zigTypeTag(zcu)) {
.array, .vector => try path.writer(arena).print("[{d}]", .{elem_idx}),
.pointer => switch (elem_idx) {
Value.slice_ptr_index => try path.appendSlice(arena, ".ptr"),
Value.slice_len_index => try path.appendSlice(arena, ".len"),
else => unreachable,
},
.@"struct" => if (agg_ty.isTuple(zcu)) {
try path.writer(arena).print("[{d}]", .{elem_idx});
} else {
const name = agg_ty.structFieldName(elem_idx, zcu).unwrap().?;
try path.writer(arena).print(".{i}", .{name.fmt(ip)});
},
else => unreachable,
}
return sema.notePathToComptimeAllocPtrInner(.fromInterned(elem), path);
},
else => unreachable,
}
}
/// Returns true if any value contained in `val` is undefined.
fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool {
const pt = sema.pt;

View File

@@ -4064,6 +4064,7 @@ pub const PointerDeriveStep = union(enum) {
nav_ptr: InternPool.Nav.Index,
uav_ptr: InternPool.Key.Ptr.BaseAddr.Uav,
comptime_alloc_ptr: struct {
idx: InternPool.ComptimeAllocIndex,
val: Value,
ptr_ty: Type,
},
@@ -4110,7 +4111,7 @@ pub const PointerDeriveStep = union(enum) {
};
pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread) Allocator.Error!PointerDeriveStep {
return ptr_val.pointerDerivationAdvanced(arena, pt, false, {}) catch |err| switch (err) {
return ptr_val.pointerDerivationAdvanced(arena, pt, false, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AnalysisFail => unreachable,
};
@@ -4120,7 +4121,7 @@ pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread) Al
/// only field and element pointers with no casts. This can be used by codegen backends
/// which prefer field/elem accesses when lowering constant pointer values.
/// It is also used by the Value printing logic for pointers.
pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, comptime have_sema: bool, sema: if (have_sema) *Sema else void) !PointerDeriveStep {
pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, comptime resolve_types: bool, opt_sema: ?*Sema) !PointerDeriveStep {
const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
const base_derive: PointerDeriveStep = switch (ptr.base_addr) {
@@ -4143,11 +4144,12 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
} };
},
.comptime_alloc => |idx| base: {
if (!have_sema) unreachable;
const sema = opt_sema.?;
const alloc = sema.getComptimeAlloc(idx);
const val = try alloc.val.intern(pt, sema.arena);
const ty = val.typeOf(zcu);
break :base .{ .comptime_alloc_ptr = .{
.idx = idx,
.val = val,
.ptr_ty = try pt.ptrType(.{
.child = ty.toIntern(),
@@ -4162,7 +4164,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
const base_ptr = Value.fromInterned(eu_ptr);
const base_ptr_ty = base_ptr.typeOf(zcu);
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, pt, have_sema, sema);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, pt, resolve_types, opt_sema);
break :base .{ .eu_payload_ptr = .{
.parent = parent_step,
.result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)),
@@ -4172,7 +4174,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
const base_ptr = Value.fromInterned(opt_ptr);
const base_ptr_ty = base_ptr.typeOf(zcu);
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, pt, have_sema, sema);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, pt, resolve_types, opt_sema);
break :base .{ .opt_payload_ptr = .{
.parent = parent_step,
.result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)),
@@ -4185,15 +4187,15 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
.@"struct" => .{ agg_ty.fieldType(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
@intCast(field.index),
if (have_sema) .sema else .normal,
if (resolve_types) .sema else .normal,
pt.zcu,
if (have_sema) pt.tid else {},
if (resolve_types) pt.tid else {},
) },
.@"union" => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
@intCast(field.index),
if (have_sema) .sema else .normal,
if (resolve_types) .sema else .normal,
pt.zcu,
if (have_sema) pt.tid else {},
if (resolve_types) pt.tid else {},
) },
.pointer => .{ switch (field.index) {
Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
@@ -4217,7 +4219,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
},
});
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, pt, have_sema, sema);
parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, pt, resolve_types, opt_sema);
break :base .{ .field_ptr = .{
.parent = parent_step,
.field_idx = @intCast(field.index),
@@ -4226,7 +4228,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
},
.arr_elem => |arr_elem| base: {
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, pt, have_sema, sema);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, pt, resolve_types, opt_sema);
const parent_ptr_info = (try parent_step.ptrType(pt)).ptrInfo(zcu);
const result_ptr_ty = try pt.ptrType(.{
.child = parent_ptr_info.child,
@@ -4248,7 +4250,8 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
return base_derive;
}
const need_child = Type.fromInterned(ptr.ty).childType(zcu);
const ptr_ty_info = Type.fromInterned(ptr.ty).ptrInfo(zcu);
const need_child: Type = .fromInterned(ptr_ty_info.child);
if (need_child.comptimeOnly(zcu)) {
// No refinement can happen - this pointer is presumably invalid.
// Just offset it.
@@ -4293,16 +4296,34 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
.frame,
.@"enum",
.vector,
.optional,
.@"union",
=> break,
.optional => {
ptr_opt: {
if (!cur_ty.isPtrLikeOptional(zcu)) break :ptr_opt;
if (need_child.zigTypeTag(zcu) != .pointer) break :ptr_opt;
switch (need_child.ptrSize(zcu)) {
.One, .Many => {},
.Slice, .C => break :ptr_opt,
}
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
cur_derive = .{ .opt_payload_ptr = .{
.parent = parent,
.result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), cur_ty.optionalChild(zcu)),
} };
continue;
}
break;
},
.array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
const start_idx = cur_offset / elem_size;
const end_idx = (cur_offset + need_bytes + elem_size - 1) / elem_size;
if (end_idx == start_idx + 1) {
if (end_idx == start_idx + 1 and ptr_ty_info.flags.size == .One) {
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
cur_derive = .{ .elem_ptr = .{
@@ -4363,7 +4384,22 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
}
};
if (cur_offset == 0 and (try cur_derive.ptrType(pt)).toIntern() == ptr.ty) {
if (cur_offset == 0) compatible: {
const src_ptr_ty_info = (try cur_derive.ptrType(pt)).ptrInfo(zcu);
// We allow silently doing some "coercible" pointer things.
// In particular, we only give up if cv qualifiers are *removed*.
if (src_ptr_ty_info.flags.is_const and !ptr_ty_info.flags.is_const) break :compatible;
if (src_ptr_ty_info.flags.is_volatile and !ptr_ty_info.flags.is_volatile) break :compatible;
if (src_ptr_ty_info.flags.is_allowzero and !ptr_ty_info.flags.is_allowzero) break :compatible;
// Everything else has to match exactly.
if (src_ptr_ty_info.child != ptr_ty_info.child) break :compatible;
if (src_ptr_ty_info.sentinel != ptr_ty_info.sentinel) break :compatible;
if (src_ptr_ty_info.packed_offset != ptr_ty_info.packed_offset) break :compatible;
if (src_ptr_ty_info.flags.size != ptr_ty_info.flags.size) break :compatible;
if (src_ptr_ty_info.flags.alignment != ptr_ty_info.flags.alignment) break :compatible;
if (src_ptr_ty_info.flags.address_space != ptr_ty_info.flags.address_space) break :compatible;
if (src_ptr_ty_info.flags.vector_index != ptr_ty_info.flags.vector_index) break :compatible;
return cur_derive;
}

View File

@@ -3944,7 +3944,7 @@ fn recreateEnumType(
assert(captures_len == key.captures.owned.len); // synchronises with logic in `Zcu.mapOldZirToNew`
extra_index += captures_len;
extra_index += captures_len * 2;
extra_index += decls_len;
const body = zir.bodySlice(extra_index, body_len);
@@ -4071,7 +4071,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1; // backing_int_body_len
@@ -4101,7 +4101,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
break :decls zir.bodySlice(extra_index, decls_len);
},
.@"enum" => decls: {
@@ -4122,7 +4122,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
break :decls zir.bodySlice(extra_index, decls_len);
},
.@"opaque" => decls: {
@@ -4140,7 +4140,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += captures_len * 2;
break :decls zir.bodySlice(extra_index, decls_len);
},
};

View File

@@ -29,7 +29,7 @@ pub fn formatSema(
_ = options;
const sema = ctx.opt_sema.?;
comptime std.debug.assert(fmt.len == 0);
return print(ctx.val, writer, ctx.depth, ctx.pt, true, sema) catch |err| switch (err) {
return print(ctx.val, writer, ctx.depth, ctx.pt, sema) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
error.ComptimeBreak, error.ComptimeReturn => unreachable,
error.AnalysisFail => unreachable, // TODO: re-evaluate when we use `sema` more fully
@@ -46,7 +46,7 @@ pub fn format(
_ = options;
std.debug.assert(ctx.opt_sema == null);
comptime std.debug.assert(fmt.len == 0);
return print(ctx.val, writer, ctx.depth, ctx.pt, false, {}) catch |err| switch (err) {
return print(ctx.val, writer, ctx.depth, ctx.pt, null) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
error.ComptimeBreak, error.ComptimeReturn, error.AnalysisFail => unreachable,
else => |e| return e,
@@ -58,9 +58,7 @@ pub fn print(
writer: anytype,
level: u8,
pt: Zcu.PerThread,
/// If this `Sema` is provided, we will recurse through pointers where possible to provide friendly output.
comptime have_sema: bool,
sema: if (have_sema) *Sema else void,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@@ -94,11 +92,11 @@ pub fn print(
.func => |func| try writer.print("(function '{}')", .{ip.getNav(func.owner_nav).name.fmt(ip)}),
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (have_sema) {
.lazy_align => |ty| if (opt_sema != null) {
const a = try Type.fromInterned(ty).abiAlignmentSema(pt);
try writer.print("{}", .{a.toByteUnits() orelse 0});
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}),
.lazy_size => |ty| if (have_sema) {
.lazy_size => |ty| if (opt_sema != null) {
const s = try Type.fromInterned(ty).abiSizeSema(pt);
try writer.print("{}", .{s});
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}),
@@ -110,7 +108,7 @@ pub fn print(
.err_name => |err_name| try writer.print("error.{}", .{
err_name.fmt(ip),
}),
.payload => |payload| try print(Value.fromInterned(payload), writer, level, pt, have_sema, sema),
.payload => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
},
.enum_literal => |enum_literal| try writer.print(".{}", .{
enum_literal.fmt(ip),
@@ -124,7 +122,7 @@ pub fn print(
return writer.writeAll("@enumFromInt(...)");
}
try writer.writeAll("@enumFromInt(");
try print(Value.fromInterned(enum_tag.int), writer, level - 1, pt, have_sema, sema);
try print(Value.fromInterned(enum_tag.int), writer, level - 1, pt, opt_sema);
try writer.writeAll(")");
},
.empty_enum_value => try writer.writeAll("(empty enum value)"),
@@ -141,12 +139,12 @@ pub fn print(
// TODO: eventually we want to load the slice as an array with `sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
try printPtr(Value.fromInterned(slice.ptr), writer, level, pt, have_sema, sema);
try printPtr(Value.fromInterned(slice.ptr), null, writer, level, pt, opt_sema);
try writer.writeAll("[0..");
if (level == 0) {
try writer.writeAll("(...)");
} else {
try print(Value.fromInterned(slice.len), writer, level - 1, pt, have_sema, sema);
try print(Value.fromInterned(slice.len), writer, level - 1, pt, opt_sema);
}
try writer.writeAll("]");
},
@@ -160,13 +158,13 @@ pub fn print(
// TODO: eventually we want to load the pointer with `sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
try printPtr(val, writer, level, pt, have_sema, sema);
try printPtr(val, .rvalue, writer, level, pt, opt_sema);
},
.opt => |opt| switch (opt.val) {
.none => try writer.writeAll("null"),
else => |payload| try print(Value.fromInterned(payload), writer, level, pt, have_sema, sema),
else => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
},
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, pt, have_sema, sema),
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, pt, opt_sema),
.un => |un| {
if (level == 0) {
try writer.writeAll(".{ ... }");
@@ -175,13 +173,13 @@ pub fn print(
if (un.tag == .none) {
const backing_ty = try val.typeOf(zcu).unionBackingType(pt);
try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(pt)});
try print(Value.fromInterned(un.val), writer, level - 1, pt, have_sema, sema);
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll("))");
} else {
try writer.writeAll(".{ ");
try print(Value.fromInterned(un.tag), writer, level - 1, pt, have_sema, sema);
try print(Value.fromInterned(un.tag), writer, level - 1, pt, opt_sema);
try writer.writeAll(" = ");
try print(Value.fromInterned(un.val), writer, level - 1, pt, have_sema, sema);
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll(" }");
}
},
@@ -196,8 +194,7 @@ fn printAggregate(
writer: anytype,
level: u8,
pt: Zcu.PerThread,
comptime have_sema: bool,
sema: if (have_sema) *Sema else void,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
if (level == 0) {
if (is_ref) try writer.writeByte('&');
@@ -218,7 +215,7 @@ fn printAggregate(
if (i != 0) try writer.writeAll(", ");
const field_name = ty.structFieldName(@intCast(i), zcu).unwrap().?;
try writer.print(".{i} = ", .{field_name.fmt(ip)});
try print(try val.fieldValue(pt, i), writer, level - 1, pt, have_sema, sema);
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
}
try writer.writeAll(" }");
return;
@@ -268,7 +265,7 @@ fn printAggregate(
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
try print(try val.fieldValue(pt, i), writer, level - 1, pt, have_sema, sema);
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
@@ -278,11 +275,12 @@ fn printAggregate(
fn printPtr(
ptr_val: Value,
/// Whether to print `derivation` as an lvalue or rvalue. If `null`, the more concise option is chosen.
want_kind: ?PrintPtrKind,
writer: anytype,
level: u8,
pt: Zcu.PerThread,
comptime have_sema: bool,
sema: if (have_sema) *Sema else void,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
const ptr = switch (pt.zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
.undef => return writer.writeAll("undefined"),
@@ -300,8 +298,7 @@ fn printPtr(
writer,
level,
pt,
have_sema,
sema,
opt_sema,
),
else => {},
}
@@ -309,57 +306,96 @@ fn printPtr(
var arena = std.heap.ArenaAllocator.init(pt.zcu.gpa);
defer arena.deinit();
const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), pt, have_sema, sema);
try printPtrDerivation(derivation, writer, level, pt, have_sema, sema);
const derivation = if (opt_sema) |sema|
try ptr_val.pointerDerivationAdvanced(arena.allocator(), pt, true, sema)
else
try ptr_val.pointerDerivationAdvanced(arena.allocator(), pt, false, null);
_ = try printPtrDerivation(derivation, writer, pt, want_kind, .{ .print_val = .{
.level = level,
.opt_sema = opt_sema,
} }, 20);
}
/// Print `derivation` as an lvalue, i.e. such that writing `&` before this gives the pointer value.
fn printPtrDerivation(
const PrintPtrKind = enum { lvalue, rvalue };
/// Print the pointer defined by `derivation` as an lvalue or an rvalue.
/// Returns the root derivation, which may be ignored.
pub fn printPtrDerivation(
derivation: Value.PointerDeriveStep,
writer: anytype,
level: u8,
pt: Zcu.PerThread,
comptime have_sema: bool,
sema: if (have_sema) *Sema else void,
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
/// Whether to print `derivation` as an lvalue or rvalue. If `null`, the more concise option is chosen.
/// If this is `.rvalue`, the result may look like `&foo`, so it's not necessarily valid to treat it as
/// an atom -- e.g. `&foo.*` is distinct from `(&foo).*`.
want_kind: ?PrintPtrKind,
/// How to print the "root" of the derivation. `.print_val` will recursively print other values if needed,
/// e.g. for UAV refs. `.str` will just write the root as the given string.
root_strat: union(enum) {
str: []const u8,
print_val: struct {
level: u8,
opt_sema: ?*Sema,
},
},
/// The maximum recursion depth. We can never recurse infinitely here, but the depth can be arbitrary,
/// so at this depth we just write "..." to prevent stack overflow.
ptr_depth: u8,
) !Value.PointerDeriveStep {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (derivation) {
.int => |int| try writer.print("@as({}, @ptrFromInt({x})).*", .{
int.ptr_ty.fmt(pt),
int.addr,
}),
.nav_ptr => |nav| {
try writer.print("{}", .{ip.getNav(nav).fqn.fmt(ip)});
},
.uav_ptr => |uav| {
const ty = Value.fromInterned(uav.val).typeOf(zcu);
try writer.print("@as({}, ", .{ty.fmt(pt)});
try print(Value.fromInterned(uav.val), writer, level - 1, pt, have_sema, sema);
try writer.writeByte(')');
},
.comptime_alloc_ptr => |info| {
try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(pt)});
try print(info.val, writer, level - 1, pt, have_sema, sema);
try writer.writeByte(')');
},
.comptime_field_ptr => |val| {
const ty = val.typeOf(zcu);
try writer.print("@as({}, ", .{ty.fmt(pt)});
try print(val, writer, level - 1, pt, have_sema, sema);
try writer.writeByte(')');
},
.eu_payload_ptr => |info| {
if (ptr_depth == 0) {
const root_step = root: switch (derivation) {
inline .eu_payload_ptr,
.opt_payload_ptr,
.field_ptr,
.elem_ptr,
.offset_and_cast,
=> |step| continue :root step.parent.*,
else => |step| break :root step,
};
try writer.writeAll("...");
return root_step;
}
const result_kind: PrintPtrKind = switch (derivation) {
.nav_ptr,
.uav_ptr,
.comptime_alloc_ptr,
.comptime_field_ptr,
.eu_payload_ptr,
.opt_payload_ptr,
.field_ptr,
.elem_ptr,
=> .lvalue,
.offset_and_cast,
.int,
=> .rvalue,
};
const need_kind = want_kind orelse result_kind;
if (need_kind == .rvalue and result_kind == .lvalue) {
try writer.writeByte('&');
}
// null if `derivation` is the root.
const root_or_null: ?Value.PointerDeriveStep = switch (derivation) {
.eu_payload_ptr => |info| root: {
try writer.writeByte('(');
try printPtrDerivation(info.parent.*, writer, level, pt, have_sema, sema);
const root = try printPtrDerivation(info.parent.*, writer, pt, .lvalue, root_strat, ptr_depth - 1);
try writer.writeAll(" catch unreachable)");
break :root root;
},
.opt_payload_ptr => |info| {
try printPtrDerivation(info.parent.*, writer, level, pt, have_sema, sema);
.opt_payload_ptr => |info| root: {
const root = try printPtrDerivation(info.parent.*, writer, pt, .lvalue, root_strat, ptr_depth - 1);
try writer.writeAll(".?");
break :root root;
},
.field_ptr => |field| {
try printPtrDerivation(field.parent.*, writer, level, pt, have_sema, sema);
.field_ptr => |field| root: {
const root = try printPtrDerivation(field.parent.*, writer, pt, null, root_strat, ptr_depth - 1);
const agg_ty = (try field.parent.ptrType(pt)).childType(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
.@"struct" => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
@@ -379,19 +415,58 @@ fn printPtrDerivation(
},
else => unreachable,
}
break :root root;
},
.elem_ptr => |elem| {
try printPtrDerivation(elem.parent.*, writer, level, pt, have_sema, sema);
.elem_ptr => |elem| root: {
const root = try printPtrDerivation(elem.parent.*, writer, pt, null, root_strat, ptr_depth - 1);
try writer.print("[{d}]", .{elem.elem_idx});
break :root root;
},
.offset_and_cast => |oac| if (oac.byte_offset == 0) {
.offset_and_cast => |oac| if (oac.byte_offset == 0) root: {
try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(pt)});
try printPtrDerivation(oac.parent.*, writer, level, pt, have_sema, sema);
const root = try printPtrDerivation(oac.parent.*, writer, pt, .rvalue, root_strat, ptr_depth - 1);
try writer.writeAll("))");
} else {
break :root root;
} else root: {
try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(pt)});
try printPtrDerivation(oac.parent.*, writer, level, pt, have_sema, sema);
const root = try printPtrDerivation(oac.parent.*, writer, pt, .rvalue, root_strat, ptr_depth - 1);
try writer.print(") + {d}))", .{oac.byte_offset});
break :root root;
},
.int, .nav_ptr, .uav_ptr, .comptime_alloc_ptr, .comptime_field_ptr => null,
};
if (root_or_null == null) switch (root_strat) {
.str => |x| try writer.writeAll(x),
.print_val => |x| switch (derivation) {
.int => |int| try writer.print("@as({}, @ptrFromInt(0x{x}))", .{ int.ptr_ty.fmt(pt), int.addr }),
.nav_ptr => |nav| try writer.print("{}", .{ip.getNav(nav).fqn.fmt(ip)}),
.uav_ptr => |uav| {
const ty = Value.fromInterned(uav.val).typeOf(zcu);
try writer.print("@as({}, ", .{ty.fmt(pt)});
try print(Value.fromInterned(uav.val), writer, x.level - 1, pt, x.opt_sema);
try writer.writeByte(')');
},
.comptime_alloc_ptr => |info| {
try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(pt)});
try print(info.val, writer, x.level - 1, pt, x.opt_sema);
try writer.writeByte(')');
},
.comptime_field_ptr => |val| {
const ty = val.typeOf(zcu);
try writer.print("@as({}, ", .{ty.fmt(pt)});
try print(val, writer, x.level - 1, pt, x.opt_sema);
try writer.writeByte(')');
},
else => unreachable,
},
};
if (need_kind == .lvalue and result_kind == .rvalue) {
try writer.writeAll(".*");
}
return root_or_null orelse derivation;
}

View File

@@ -1430,19 +1430,8 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
extra_index = try self.writeCaptures(stream, extra_index, captures_len);
try stream.writeAll(", ");
if (small.has_backing_int) {
const backing_int_body_len = self.code.extra[extra_index];
@@ -1645,19 +1634,8 @@ const Writer = struct {
});
try self.writeFlag(stream, "autoenum, ", small.auto_enum_tag);
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
extra_index = try self.writeCaptures(stream, extra_index, captures_len);
try stream.writeAll(", ");
if (decls_len == 0) {
try stream.writeAll("{}");
@@ -1805,19 +1783,8 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
try self.writeFlag(stream, "nonexhaustive, ", small.nonexhaustive);
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
extra_index = try self.writeCaptures(stream, extra_index, captures_len);
try stream.writeAll(", ");
if (decls_len == 0) {
try stream.writeAll("{}, ");
@@ -1910,19 +1877,8 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
extra_index = try self.writeCaptures(stream, extra_index, captures_len);
try stream.writeAll(", ");
if (decls_len == 0) {
try stream.writeAll("{}) ");
@@ -2720,6 +2676,26 @@ const Writer = struct {
return stream.print("%{d}", .{@intFromEnum(inst)});
}
fn writeCaptures(self: *Writer, stream: anytype, extra_index: usize, captures_len: u32) !usize {
if (captures_len == 0) {
try stream.writeAll("{}");
return extra_index;
}
const captures: []const Zir.Inst.Capture = @ptrCast(self.code.extra[extra_index..][0..captures_len]);
const capture_names: []const Zir.NullTerminatedString = @ptrCast(self.code.extra[extra_index + captures_len ..][0..captures_len]);
for (captures, capture_names) |capture, name| {
try stream.writeAll("{ ");
if (name != .empty) {
const name_slice = self.code.nullTerminatedString(name);
try stream.print("{s} = ", .{name_slice});
}
try self.writeCapture(stream, capture);
}
return extra_index + 2 * captures_len;
}
fn writeCapture(self: *Writer, stream: anytype, capture: Zir.Inst.Capture) !void {
switch (capture.unwrap()) {
.nested => |i| return stream.print("[{d}]", .{i}),

View File

@@ -3,10 +3,8 @@ export fn entry() void {
}
// error
// backend=stage2
// target=native
//
// :2:5: error: found compile log statement
//
// Compile Log Output:
// @as(*const anyopaque, @as(*const anyopaque, @ptrCast(tmp.entry)))
// @as(*const anyopaque, @as(*const anyopaque, @ptrCast(&tmp.entry)))

View File

@@ -67,19 +67,28 @@ export fn far() void {
//
// :5:19: error: runtime value contains reference to comptime var
// :5:19: note: comptime var pointers are not available at runtime
// :4:27: note: 'runtime_value' points to comptime var declared here
// :12:40: error: runtime value contains reference to comptime var
// :12:40: note: comptime var pointers are not available at runtime
// :11:27: note: 'runtime_value' points to comptime var declared here
// :19:50: error: runtime value contains reference to comptime var
// :19:50: note: comptime var pointers are not available at runtime
// :18:27: note: 'runtime_value' points to comptime var declared here
// :28:9: error: runtime value contains reference to comptime var
// :28:9: note: comptime var pointers are not available at runtime
// :27:27: note: 'runtime_value' points to comptime var declared here
// :36:9: error: runtime value contains reference to comptime var
// :36:9: note: comptime var pointers are not available at runtime
// :35:27: note: 'runtime_value' points to comptime var declared here
// :41:12: error: runtime value contains reference to comptime var
// :41:12: note: comptime var pointers are not available at runtime
// :40:27: note: 'runtime_value' points to comptime var declared here
// :46:39: error: runtime value contains reference to comptime var
// :46:39: note: comptime var pointers are not available at runtime
// :45:27: note: 'runtime_value' points to comptime var declared here
// :55:18: error: runtime value contains reference to comptime var
// :55:18: note: comptime var pointers are not available at runtime
// :51:30: note: 'runtime_value' points to comptime var declared here
// :63:18: error: runtime value contains reference to comptime var
// :63:18: note: comptime var pointers are not available at runtime
// :59:27: note: 'runtime_value' points to comptime var declared here

View File

@@ -47,10 +47,19 @@ export var h: *[1]u32 = h: {
// error
//
// :1:27: error: global variable contains reference to comptime var
// :2:18: note: 'a' points to comptime var declared here
// :6:30: error: global variable contains reference to comptime var
// :7:18: note: 'b[0]' points to comptime var declared here
// :11:30: error: global variable contains reference to comptime var
// :12:18: note: 'c' points to comptime var declared here
// :16:33: error: global variable contains reference to comptime var
// :17:18: note: 'd' points to comptime var declared here
// :22:24: error: global variable contains reference to comptime var
// :23:18: note: 'e.ptr' points to comptime var declared here
// :28:33: error: global variable contains reference to comptime var
// :29:18: note: 'f' points to comptime var declared here
// :34:40: error: global variable contains reference to comptime var
// :34:40: note: 'g' points to 'v0[0]', where
// :36:24: note: 'v0[1]' points to comptime var declared here
// :42:28: error: global variable contains reference to comptime var
// :43:22: note: 'h' points to comptime var declared here

View File

@@ -0,0 +1,25 @@
const Wrapper = struct { ptr: *ComptimeThing };
const ComptimeThing = struct {
x: comptime_int,
fn NewType(comptime ct: *ComptimeThing) type {
const wrapper: Wrapper = .{ .ptr = ct };
return struct {
pub fn foo() void {
_ = wrapper.ct;
}
};
}
};
comptime {
var ct: ComptimeThing = .{ .x = 123 };
const Inner = ct.NewType();
Inner.foo();
}
// error
//
// :7:16: error: captured value contains reference to comptime var
// :16:30: note: 'wrapper.ptr' points to comptime var declared here
// :17:29: note: called from here

View File

@@ -31,5 +31,5 @@ pub fn main() !void {}
// :20:5: error: found compile log statement
//
// Compile Log Output:
// @as([]i32, @as([*]i32, @ptrCast(@as(tmp.UnionContainer, .{ .buf = .{ 1, 2 } }).buf[0]))[0..2])
// @as([]i32, @as([*]i32, @ptrCast(@as(tmp.StructContainer, .{ .buf = .{ 3, 4 } }).buf[0]))[0..2])
// @as([]i32, @as([*]i32, @ptrCast(&@as(tmp.UnionContainer, .{ .buf = .{ 1, 2 } }).buf))[0..2])
// @as([]i32, @as([*]i32, @ptrCast(&@as(tmp.StructContainer, .{ .buf = .{ 3, 4 } }).buf))[0..2])