Merge pull request #13219 from Vexu/stage2-fixes

Stage2 bug fixes
This commit is contained in:
Veikka Tuominen
2022-10-21 12:11:49 +02:00
committed by GitHub
46 changed files with 647 additions and 201 deletions

View File

@@ -232,7 +232,7 @@ pub const ResultLoc = union(enum) {
coerced_ty: Zir.Inst.Ref,
/// The expression must store its result into this typed pointer. The result instruction
/// from the expression must be ignored.
ptr: Zir.Inst.Ref,
ptr: PtrResultLoc,
/// The expression must store its result into this allocation, which has an inferred type.
/// The result instruction from the expression must be ignored.
/// Always an instruction with tag `alloc_inferred`.
@@ -242,6 +242,11 @@ pub const ResultLoc = union(enum) {
/// The result instruction from the expression must be ignored.
block_ptr: *GenZir,
const PtrResultLoc = struct {
inst: Zir.Inst.Ref,
src_node: ?Ast.Node.Index = null,
};
pub const Strategy = struct {
elide_store_to_block_ptr_instructions: bool,
tag: Tag,
@@ -1380,8 +1385,8 @@ fn arrayInitExpr(
const result = try arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag);
return rvalue(gz, rl, result, node);
},
.ptr => |ptr_inst| {
return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array);
.ptr => |ptr_res| {
return arrayInitExprRlPtr(gz, scope, rl, node, ptr_res.inst, array_init.ast.elements, types.array);
},
.inferred_ptr => |ptr_inst| {
if (types.array == .none) {
@@ -1513,7 +1518,7 @@ fn arrayInitExprRlPtrInner(
});
astgen.extra.items[extra_index] = refToIndex(elem_ptr).?;
extra_index += 1;
_ = try expr(gz, scope, .{ .ptr = elem_ptr }, elem_init);
_ = try expr(gz, scope, .{ .ptr = .{ .inst = elem_ptr } }, elem_init);
}
const tag: Zir.Inst.Tag = if (gz.force_comptime)
@@ -1631,7 +1636,7 @@ fn structInitExpr(
const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init);
return rvalue(gz, rl, result, node);
},
.ptr => |ptr_inst| return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_inst),
.ptr => |ptr_res| return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_res.inst),
.inferred_ptr => |ptr_inst| {
if (struct_init.ast.type_expr == 0) {
// We treat this case differently so that we don't get a crash when
@@ -1739,7 +1744,7 @@ fn structInitExprRlPtrInner(
});
astgen.extra.items[extra_index] = refToIndex(field_ptr).?;
extra_index += 1;
_ = try expr(gz, scope, .{ .ptr = field_ptr }, field_init);
_ = try expr(gz, scope, .{ .ptr = .{ .inst = field_ptr } }, field_init);
}
const tag: Zir.Inst.Tag = if (gz.force_comptime)
@@ -2998,7 +3003,7 @@ fn varDecl(
}
};
gz.rl_ty_inst = type_inst;
break :a .{ .alloc = alloc, .result_loc = .{ .ptr = alloc } };
break :a .{ .alloc = alloc, .result_loc = .{ .ptr = .{ .inst = alloc } } };
} else a: {
const alloc = alloc: {
if (align_inst == .none) {
@@ -3098,7 +3103,10 @@ fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!voi
}
}
const lvalue = try lvalExpr(gz, scope, lhs);
_ = try expr(gz, scope, .{ .ptr = lvalue }, rhs);
_ = try expr(gz, scope, .{ .ptr = .{
.inst = lvalue,
.src_node = infix_node,
} }, rhs);
}
fn assignOp(
@@ -6729,7 +6737,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
}
const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node, true)) .{
.ptr = try gz.addNode(.ret_ptr, node),
.ptr = .{ .inst = try gz.addNode(.ret_ptr, node) },
} else .{
.ty = try gz.addNode(.ret_type, node),
};
@@ -6748,7 +6756,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
},
.always => {
// Value is always an error. Emit both error defers and regular defers.
const err_code = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
const err_code = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr.inst, node) else operand;
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
@@ -6765,7 +6773,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
}
// Emit conditional branch for generating errdefers.
const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr.inst, node) else operand;
const is_non_err = try gz.addUnNode(.is_non_err, result, node);
const condbr = try gz.addCondBr(.condbr, node);
@@ -7336,7 +7344,10 @@ fn as(
const result = try reachableExpr(gz, scope, .{ .ty = dest_type }, rhs, node);
return rvalue(gz, rl, result, node);
},
.ptr, .inferred_ptr => |result_ptr| {
.ptr => |result_ptr| {
return asRlPtr(gz, scope, rl, node, result_ptr.inst, rhs, dest_type);
},
.inferred_ptr => |result_ptr| {
return asRlPtr(gz, scope, rl, node, result_ptr, rhs, dest_type);
},
.block_ptr => |block_scope| {
@@ -9569,9 +9580,9 @@ fn rvalue(
}),
}
},
.ptr => |ptr_inst| {
_ = try gz.addPlNode(.store_node, src_node, Zir.Inst.Bin{
.lhs = ptr_inst,
.ptr => |ptr_res| {
_ = try gz.addPlNode(.store_node, ptr_res.src_node orelse src_node, Zir.Inst.Bin{
.lhs = ptr_res.inst,
.rhs = result,
});
return result;
@@ -10444,11 +10455,16 @@ const GenZir = struct {
gz.break_result_loc = parent_rl;
},
.discard, .none, .ptr, .ref => {
.discard, .none, .ref => {
gz.rl_ty_inst = .none;
gz.break_result_loc = parent_rl;
},
.ptr => |ptr_res| {
gz.rl_ty_inst = .none;
gz.break_result_loc = .{ .ptr = .{ .inst = ptr_res.inst } };
},
.inferred_ptr => |ptr| {
gz.rl_ty_inst = .none;
gz.rl_ptr = ptr;
@@ -11610,7 +11626,7 @@ const GenZir = struct {
fn addRet(gz: *GenZir, rl: ResultLoc, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void {
switch (rl) {
.ptr => |ret_ptr| _ = try gz.addUnNode(.ret_load, ret_ptr, node),
.ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node),
.ty, .ty_shift_operand => _ = try gz.addUnNode(.ret_node, operand, node),
else => unreachable,
}

View File

@@ -2878,6 +2878,32 @@ pub const SrcLoc = struct {
};
return nodeToSpan(tree, full.ast.type_expr);
},
.node_offset_store_ptr => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const node = src_loc.declRelativeToNodeIndex(node_off);
switch (node_tags[node]) {
.assign => {
return nodeToSpan(tree, node_datas[node].lhs);
},
else => return nodeToSpan(tree, node),
}
},
.node_offset_store_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const node = src_loc.declRelativeToNodeIndex(node_off);
switch (node_tags[node]) {
.assign => {
return nodeToSpan(tree, node_datas[node].rhs);
},
else => return nodeToSpan(tree, node),
}
},
}
}
@@ -3213,6 +3239,12 @@ pub const LazySrcLoc = union(enum) {
/// The source location points to the type of an array or struct initializer.
/// The Decl is determined contextually.
node_offset_init_ty: i32,
/// The source location points to the LHS of an assignment.
/// The Decl is determined contextually.
node_offset_store_ptr: i32,
/// The source location points to the RHS of an assignment.
/// The Decl is determined contextually.
node_offset_store_operand: i32,
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
@@ -3296,6 +3328,8 @@ pub const LazySrcLoc = union(enum) {
.node_offset_container_tag,
.node_offset_field_default,
.node_offset_init_ty,
.node_offset_store_ptr,
.node_offset_store_operand,
=> .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
@@ -5607,6 +5641,18 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
else => |e| return e,
};
{
var it = sema.unresolved_inferred_allocs.keyIterator();
while (it.next()) |ptr_inst| {
// The lack of a resolve_inferred_alloc means that this instruction
// is unused so it just has to be a no-op.
sema.air_instructions.set(ptr_inst.*, .{
.tag = .alloc,
.data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) },
});
}
}
// If we don't get an error return trace from a caller, create our own.
if (func.calls_or_awaits_errorable_fn and
mod.comp.bin_file.options.error_return_tracing and

View File

@@ -82,6 +82,8 @@ is_generic_instantiation: bool = false,
/// function types will emit generic poison instead of a partial type.
no_partial_func_ty: bool = false,
unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
const std = @import("std");
const math = std.math;
const mem = std.mem;
@@ -579,6 +581,7 @@ pub fn deinit(sema: *Sema) void {
}
sema.post_hoc_blocks.deinit(gpa);
}
sema.unresolved_inferred_allocs.deinit(gpa);
sema.* = undefined;
}
@@ -1235,9 +1238,6 @@ fn analyzeBodyInner(
i = 0;
continue;
} else {
const src_node = sema.code.instructions.items(.data)[inst].node;
const src = LazySrcLoc.nodeOffset(src_node);
try sema.requireFunctionBlock(block, src);
break always_noreturn;
}
},
@@ -2186,7 +2186,6 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
defer trash_block.instructions.deinit(sema.gpa);
const operand = try trash_block.addBitCast(pointee_ty, .void_value);
try sema.requireFunctionBlock(block, src);
const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = pointee_ty,
.@"align" = inferred_alloc.alignment,
@@ -3219,7 +3218,6 @@ fn zirAllocExtended(
try sema.validateVarType(block, ty_src, var_ty, false);
}
const target = sema.mod.getTarget();
try sema.requireFunctionBlock(block, src);
try sema.resolveTypeLayout(block, src, var_ty);
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = var_ty,
@@ -3237,8 +3235,8 @@ fn zirAllocExtended(
inferred_alloc_ty,
try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }),
);
try sema.requireFunctionBlock(block, src);
try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {});
return result;
}
@@ -3318,7 +3316,6 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
return sema.addConstant(const_ptr_ty, val);
}
try sema.requireFunctionBlock(block, src);
return block.addBitCast(const_ptr_ty, alloc);
}
@@ -3345,7 +3342,6 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_decl_src = inst_data.src();
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src);
@@ -3355,7 +3351,6 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.pointee_type = var_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
try sema.requireFunctionBlock(block, var_decl_src);
try sema.queueFullTypeResolution(var_ty);
return block.addTy(.alloc, ptr_type);
}
@@ -3365,7 +3360,6 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const var_decl_src = inst_data.src();
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
@@ -3377,7 +3371,6 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.pointee_type = var_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
try sema.requireFunctionBlock(block, var_decl_src);
try sema.queueFullTypeResolution(var_ty);
return block.addTy(.alloc, ptr_type);
}
@@ -3413,8 +3406,8 @@ fn zirAllocInferred(
inferred_alloc_ty,
try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }),
);
try sema.requireFunctionBlock(block, src);
try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {});
return result;
}
@@ -3464,6 +3457,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
}
},
.inferred_alloc => {
assert(sema.unresolved_inferred_allocs.remove(ptr_inst));
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
const peer_inst_list = inferred_alloc.data.prongs.items(.stored_inst);
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
@@ -3566,7 +3560,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
return;
}
try sema.requireFunctionBlock(block, src);
try sema.queueFullTypeResolution(final_elem_ty);
// Change it to a normal alloc.
@@ -3912,7 +3905,6 @@ fn validateUnionInit(
return;
}
try sema.requireFunctionBlock(block, init_src);
const new_tag = try sema.addConstant(tag_ty, tag_val);
_ = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
}
@@ -4648,7 +4640,10 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
try sema.addToInferredErrorSet(operand);
}
return sema.storePtr2(block, src, ptr, src, operand, src, if (is_ret) .ret_ptr else .store);
const ptr_src: LazySrcLoc = .{ .node_offset_store_ptr = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_store_operand = inst_data.src_node };
const air_tag: Air.Inst.Tag = if (is_ret) .ret_ptr else .store;
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
}
fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4808,7 +4803,6 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index, force_comptime: bo
if (block.is_comptime or force_comptime) {
return sema.fail(block, src, "encountered @panic at comptime", .{});
}
try sema.requireFunctionBlock(block, src);
return sema.panicWithMsg(block, src, msg_inst);
}
@@ -6288,7 +6282,6 @@ fn analyzeCall(
break :res res2;
} else res: {
assert(!func_ty_info.is_generic);
try sema.requireFunctionBlock(block, call_src);
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
for (uncasted_args) |uncasted_arg, i| {
@@ -6414,9 +6407,9 @@ fn analyzeInlineCallArg(
};
}
const casted_arg = try sema.coerce(arg_block, param_ty, uncasted_arg, arg_src);
try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
if (is_comptime_call) {
try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
if (err == error.AnalysisFail and sema.err != null) {
try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
@@ -6441,6 +6434,20 @@ fn analyzeInlineCallArg(
.ty = param_ty,
.val = arg_val,
};
} else if (((try sema.resolveMaybeUndefVal(arg_block, arg_src, casted_arg)) == null) or
try sema.typeRequiresComptime(param_ty) or zir_tags[inst] == .param_comptime)
{
try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
} else {
// We have a comptime value but we need a runtime value to preserve inlining semantics,
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = param_ty,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
});
const alloc = try arg_block.addTy(.alloc, ptr_type);
_ = try arg_block.addBinOp(.store, alloc, casted_arg);
const loaded = try arg_block.addTyOp(.load, param_ty, alloc);
try sema.inst_map.putNoClobber(sema.gpa, inst, loaded);
}
arg_i.* += 1;
@@ -6449,9 +6456,10 @@ fn analyzeInlineCallArg(
// No coercion needed.
const uncasted_arg = uncasted_args[arg_i.*];
new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg);
try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
const param_ty = sema.typeOf(uncasted_arg);
if (is_comptime_call) {
try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
if (err == error.AnalysisFail and sema.err != null) {
try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
@@ -6476,6 +6484,20 @@ fn analyzeInlineCallArg(
.ty = sema.typeOf(uncasted_arg),
.val = arg_val,
};
} else if ((try sema.resolveMaybeUndefVal(arg_block, arg_src, uncasted_arg)) == null or
try sema.typeRequiresComptime(param_ty) or zir_tags[inst] == .param_anytype_comptime)
{
try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
} else {
// We have a comptime value but we need a runtime value to preserve inlining semantics,
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = param_ty,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
});
const alloc = try arg_block.addTy(.alloc, ptr_type);
_ = try arg_block.addBinOp(.store, alloc, uncasted_arg);
const loaded = try arg_block.addTyOp(.load, param_ty, alloc);
try sema.inst_map.putNoClobber(sema.gpa, inst, loaded);
}
arg_i.* += 1;
@@ -6588,6 +6610,11 @@ fn instantiateGenericCall(
}
const arg_ty = sema.typeOf(uncasted_args[i]);
if (is_comptime or is_anytype) {
// Tuple default values are a part of the type and need to be
// resolved to hash the type.
try sema.resolveTupleLazyValues(block, call_src, arg_ty);
}
if (is_comptime) {
const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) {
@@ -6892,8 +6919,6 @@ fn instantiateGenericCall(
const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl);
// Make a runtime call to the new function, making sure to omit the comptime args.
try sema.requireFunctionBlock(block, call_src);
const comptime_args = callee.comptime_args.?;
const func_ty = mod.declPtr(callee.owner_decl).ty;
const new_fn_info = func_ty.fnInfo();
@@ -6963,6 +6988,16 @@ fn instantiateGenericCall(
return result;
}
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
if (!ty.isTuple()) return;
const tuple = ty.tupleFields();
for (tuple.values) |field_val, i| {
try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
if (field_val.tag() == .unreachable_value) continue;
try sema.resolveLazyValue(block, src, field_val);
}
}
fn emitDbgInline(
sema: *Sema,
block: *Block,
@@ -7441,7 +7476,6 @@ fn analyzeOptionalPayloadPtr(
// If the pointer resulting from this function was stored at comptime,
// the optional non-null bit would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
try sema.requireFunctionBlock(block, src);
_ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
}
return sema.addConstant(
@@ -14499,6 +14533,12 @@ fn zirClosureGet(
return sema.failWithOwnedErrorMsg(msg);
}
if (tv.val.tag() == .unreachable_value) {
assert(block.is_typeof);
// We need a dummy runtime instruction with the correct type.
return block.addTy(.alloc, tv.ty);
}
return sema.addConstant(tv.ty, tv.val);
}
@@ -15664,7 +15704,9 @@ fn zirBoolBr(
_ = try lhs_block.addBr(block_inst, lhs_result);
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
_ = try rhs_block.addBr(block_inst, rhs_result);
if (!sema.typeOf(rhs_result).isNoReturn()) {
_ = try rhs_block.addBr(block_inst, rhs_result);
}
return finishCondBr(sema, parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
}
@@ -15996,7 +16038,6 @@ fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (block.is_comptime or inst_data.force_comptime) {
return sema.fail(block, src, "reached unreachable code", .{});
}
try sema.requireFunctionBlock(block, src);
// TODO Add compile error for @optimizeFor occurring too late in a scope.
try block.addUnreachable(src, true);
return always_noreturn;
@@ -16171,6 +16212,7 @@ fn analyzeRet(
if (block.inlining) |inlining| {
if (block.is_comptime) {
_ = try sema.resolveConstMaybeUndefVal(block, src, operand, "value being returned at comptime must be comptime-known");
inlining.comptime_result = operand;
return error.ComptimeReturn;
}
@@ -21117,14 +21159,6 @@ fn zirBuiltinExtern(
return block.addBitCast(ty, ref);
}
/// Asserts that the block is not comptime.
fn requireFunctionBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
assert(!block.is_comptime);
if (sema.func == null and !block.is_typeof and !block.is_coerce_result_ptr) {
return sema.fail(block, src, "instruction illegal outside function body", .{});
}
}
fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: ?LazySrcLoc) !void {
if (block.is_comptime) {
const msg = msg: {
@@ -21138,7 +21172,6 @@ fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src:
};
return sema.failWithOwnedErrorMsg(msg);
}
try sema.requireFunctionBlock(block, src);
}
/// Emit a compile error if type cannot be used for a runtime variable.
@@ -25111,11 +25144,6 @@ fn storePtr2(
return;
}
if (block.is_comptime) {
// TODO ideally this would tell why the block is comptime
return sema.fail(block, ptr_src, "cannot store to runtime value in comptime block", .{});
}
try sema.requireRuntimeBlock(block, src, runtime_src);
try sema.queueFullTypeResolution(elem_ty);
if (is_ret) {
@@ -27023,12 +27051,6 @@ fn analyzeLoad(
}
}
if (block.is_comptime) {
// TODO ideally this would tell why the block is comptime
return sema.fail(block, ptr_src, "cannot load runtime value in comptime block", .{});
}
try sema.requireFunctionBlock(block, src);
return block.addTyOp(.load, elem_ty, ptr);
}
@@ -28581,6 +28603,20 @@ fn resolveLazyValue(
const ty = val.castTag(.lazy_size).?.data;
return sema.resolveTypeLayout(block, src, ty);
},
.comptime_field_ptr => {
const field_ptr = val.castTag(.comptime_field_ptr).?.data;
return sema.resolveLazyValue(block, src, field_ptr.field_val);
},
.@"union" => {
const union_val = val.castTag(.@"union").?.data;
return sema.resolveLazyValue(block, src, union_val.val);
},
.aggregate => {
const aggregate = val.castTag(.aggregate).?.data;
for (aggregate) |elem_val| {
try sema.resolveLazyValue(block, src, elem_val);
}
},
else => return,
}
}
@@ -28751,6 +28787,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator);
try wip_captures.finalize();
} else {
var buf: Type.Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -29362,6 +29399,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
}
}
}
try wip_captures.finalize();
struct_obj.have_field_inits = true;
}

View File

@@ -5,29 +5,21 @@ const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
pub const Class = enum { memory, integer, none, float_array };
pub const Class = enum(u8) { memory, integer, none, float_array, _ };
/// For `float_array` the second element will be the amount of floats.
pub fn classifyType(ty: Type, target: std.Target) [2]Class {
var maybe_float_bits: ?u16 = null;
const float_count = countFloats(ty, target, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array, @intToEnum(Class, float_count) };
return classifyTypeInner(ty, target);
}
fn classifyTypeInner(ty: Type, target: std.Target) [2]Class {
if (!ty.hasRuntimeBitsIgnoreComptime()) return .{ .none, .none };
switch (ty.zigTypeTag()) {
.Struct => {
if (ty.containerLayout() == .Packed) return .{ .integer, .none };
if (ty.structFieldCount() <= 4) {
const fields = ty.structFields();
var float_size: ?u64 = null;
for (fields.values()) |field| {
if (field.ty.zigTypeTag() != .Float) break;
const field_size = field.ty.bitSize(target);
const prev_size = float_size orelse {
float_size = field_size;
continue;
};
if (field_size != prev_size) break;
} else {
return .{ .float_array, .none };
}
}
const bit_size = ty.bitSize(target);
if (bit_size > 128) return .{ .memory, .none };
if (bit_size > 64) return .{ .integer, .integer };
@@ -67,6 +59,70 @@ pub fn classifyType(ty: Type, target: std.Target) [2]Class {
}
}
const sret_float_count = 4;
fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
const invalid = std.math.maxInt(u32);
switch (ty.zigTypeTag()) {
.Union => {
const fields = ty.unionFields();
var max_count: u32 = 0;
for (fields.values()) |field| {
const field_count = countFloats(field.ty, target, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > sret_float_count) return invalid;
}
return max_count;
},
.Struct => {
const fields_len = ty.structFieldCount();
var count: u32 = 0;
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
const field_count = countFloats(field_ty, target, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > sret_float_count) return invalid;
}
return count;
},
.Float => {
const float_bits = maybe_float_bits.* orelse {
maybe_float_bits.* = ty.floatBits(target);
return 1;
};
if (ty.floatBits(target) == float_bits) return 1;
return invalid;
},
.Void => return 0,
else => return invalid,
}
}
pub fn getFloatArrayType(ty: Type) ?Type {
switch (ty.zigTypeTag()) {
.Union => {
const fields = ty.unionFields();
for (fields.values()) |field| {
if (getFloatArrayType(field.ty)) |some| return some;
}
return null;
},
.Struct => {
const fields_len = ty.structFieldCount();
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
if (getFloatArrayType(field_ty)) |some| return some;
}
return null;
},
.Float => return ty,
else => return null,
}
}
const callee_preserved_regs_impl = if (builtin.os.tag.isDarwin()) struct {
pub const callee_preserved_regs = [_]Register{
.x20, .x21, .x22, .x23,

View File

@@ -388,6 +388,19 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
}
return result;
},
.Array => {
const ty_size = ty.abiSize(target);
if (ty_size <= 64) {
result[0] = .integer;
return result;
}
if (ty_size <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
return memory_class;
},
else => unreachable,
}
}

View File

@@ -3125,10 +3125,10 @@ pub const DeclGen = struct {
.as_u16 => {
try llvm_params.append(dg.context.intType(16));
},
.float_array => {
.float_array => |count| {
const param_ty = fn_info.param_types[it.zig_index - 1];
const float_ty = try dg.lowerType(param_ty.structFieldType(0));
const field_count = @intCast(c_uint, param_ty.structFieldCount());
const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?);
const field_count = @intCast(c_uint, count);
const arr_ty = float_ty.arrayType(field_count);
try llvm_params.append(arr_ty);
},
@@ -4801,7 +4801,7 @@ pub const FuncGen = struct {
const casted = self.builder.buildBitCast(llvm_arg, self.dg.context.intType(16), "");
try llvm_args.append(casted);
},
.float_array => {
.float_array => |count| {
const arg = args[it.zig_index - 1];
const arg_ty = self.air.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
@@ -4812,9 +4812,8 @@ pub const FuncGen = struct {
llvm_arg = store_inst;
}
const float_ty = try self.dg.lowerType(arg_ty.structFieldType(0));
const field_count = @intCast(u32, arg_ty.structFieldCount());
const array_llvm_ty = float_ty.arrayType(field_count);
const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?);
const array_llvm_ty = float_ty.arrayType(count);
const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), "");
const alignment = arg_ty.abiAlignment(target);
@@ -10214,7 +10213,7 @@ const ParamTypeIterator = struct {
llvm_types_buffer: [8]u16,
byval_attr: bool,
const Lowering = enum {
const Lowering = union(enum) {
no_bits,
byval,
byref,
@@ -10223,7 +10222,7 @@ const ParamTypeIterator = struct {
multiple_llvm_float,
slice,
as_u16,
float_array,
float_array: u8,
};
pub fn next(it: *ParamTypeIterator) ?Lowering {
@@ -10400,7 +10399,7 @@ const ParamTypeIterator = struct {
return .byref;
}
if (classes[0] == .float_array) {
return .float_array;
return Lowering{ .float_array = @enumToInt(classes[1]) };
}
if (classes[1] == .none) {
it.llvm_types_len = 1;

View File

@@ -3619,6 +3619,9 @@ pub const Type = extern union {
.@"struct" => {
if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
if (ty.containerLayout() != .Packed) {
return (try ty.abiSizeAdvanced(target, if (sema_kit) |sk| .{ .sema_kit = sk } else .eager)).scalar * 8;
}
var total: u64 = 0;
for (ty.structFields().values()) |field| {
total += try bitSizeAdvanced(field.ty, target, sema_kit);
@@ -3628,6 +3631,9 @@ pub const Type = extern union {
.tuple, .anon_struct => {
if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
if (ty.containerLayout() != .Packed) {
return (try ty.abiSizeAdvanced(target, if (sema_kit) |sk| .{ .sema_kit = sk } else .eager)).scalar * 8;
}
var total: u64 = 0;
for (ty.tupleFields().types) |field_ty| {
total += try bitSizeAdvanced(field_ty, target, sema_kit);
@@ -3643,6 +3649,9 @@ pub const Type = extern union {
.@"union", .union_safety_tagged, .union_tagged => {
if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
if (ty.containerLayout() != .Packed) {
return (try ty.abiSizeAdvanced(target, if (sema_kit) |sk| .{ .sema_kit = sk } else .eager)).scalar * 8;
}
const union_obj = ty.cast(Payload.Union).?.data;
assert(union_obj.haveFieldTypes());

View File

@@ -2632,6 +2632,9 @@ pub const Value = extern union {
.lazy_size,
=> return hashInt(ptr_val, hasher, target),
// The value is runtime-known and shouldn't affect the hash.
.runtime_int => {},
else => unreachable,
}
}

View File

@@ -41,6 +41,7 @@ test {
_ = @import("behavior/bugs/2006.zig");
_ = @import("behavior/bugs/2114.zig");
_ = @import("behavior/bugs/2346.zig");
_ = @import("behavior/bugs/2557.zig");
_ = @import("behavior/bugs/2578.zig");
_ = @import("behavior/bugs/2692.zig");
_ = @import("behavior/bugs/2889.zig");
@@ -87,6 +88,7 @@ test {
_ = @import("behavior/bugs/12033.zig");
_ = @import("behavior/bugs/12430.zig");
_ = @import("behavior/bugs/12486.zig");
_ = @import("behavior/bugs/12488.zig");
_ = @import("behavior/bugs/12551.zig");
_ = @import("behavior/bugs/12644.zig");
_ = @import("behavior/bugs/12680.zig");
@@ -103,7 +105,10 @@ test {
_ = @import("behavior/bugs/12972.zig");
_ = @import("behavior/bugs/12984.zig");
_ = @import("behavior/bugs/13068.zig");
_ = @import("behavior/bugs/13112.zig");
_ = @import("behavior/bugs/13128.zig");
_ = @import("behavior/bugs/13164.zig");
_ = @import("behavior/bugs/13171.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");

View File

@@ -0,0 +1,13 @@
const expect = @import("std").testing.expect;
const A = struct {
a: u32,
};
fn foo(comptime a: anytype) !void {
try expect(a[0][0] == @sizeOf(A));
}
test {
try foo(.{[_]usize{@sizeOf(A)}});
}

View File

@@ -0,0 +1,7 @@
fn nice(a: u32, b: u32) bool {
return a == 5 or b == 2 or @panic("oh no");
}
test {
_ = nice(2, 2);
}

View File

@@ -0,0 +1,16 @@
const std = @import("std");
const builtin = @import("builtin");
inline fn setLimits(min: ?u32, max: ?u32) !void {
if (min != null and max != null) {
try std.testing.expect(min.? <= max.?);
}
}
test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
var x: u32 = 42;
try setLimits(x, null);
}

View File

@@ -0,0 +1,16 @@
const std = @import("std");
const expect = std.testing.expect;
fn BuildType(comptime T: type) type {
return struct {
val: union {
b: T,
},
};
}
test {
const TestStruct = BuildType(u32);
const c = TestStruct{ .val = .{ .b = 10 } };
try expect(c.val.b == 10);
}

View File

@@ -0,0 +1,6 @@
test {
var a = if (true) {
return;
} else true;
_ = a;
}

View File

@@ -1414,3 +1414,14 @@ test "continue nested inline for loop" {
}
try expect(a == 2);
}
test "length of global array is determinable at comptime" {
const S = struct {
var bytes: [1024]u8 = undefined;
fn foo() !void {
try std.testing.expect(bytes.len == 1024);
}
};
comptime try S.foo();
}

View File

@@ -218,7 +218,7 @@ test "@bitSizeOf" {
try expect(@bitSizeOf(u8) == @sizeOf(u8) * 8);
try expect(@bitSizeOf(struct {
a: u2,
}) == 2);
}) == 8);
try expect(@bitSizeOf(packed struct {
a: u2,
}) == 2);
@@ -314,3 +314,27 @@ test "lazy size cast to float" {
test "bitSizeOf comptime_int" {
try expect(@bitSizeOf(comptime_int) == 0);
}
test "runtime instructions inside typeof in comptime only scope" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
{
var y: i8 = 2;
const i: [2]i8 = [_]i8{ 1, y };
const T = struct {
a: @TypeOf(i) = undefined, // causes crash
b: @TypeOf(i[0]) = undefined, // causes crash
};
try expect(@TypeOf((T{}).a) == [2]i8);
try expect(@TypeOf((T{}).b) == i8);
}
{
var y: i8 = 2;
const i = .{ 1, y };
const T = struct {
b: @TypeOf(i[1]) = undefined,
};
try expect(@TypeOf((T{}).b) == i8);
}
}

View File

@@ -20,3 +20,14 @@ test "@src" {
try doTheTest();
}
test "@src used as a comptime parameter" {
const S = struct {
fn Foo(comptime _: std.builtin.SourceLocation) type {
return struct {};
}
};
const T1 = S.Foo(@src());
const T2 = S.Foo(@src());
try expect(T1 != T2);
}

View File

@@ -12,6 +12,27 @@ static void assert_or_panic(bool ok) {
}
}
#ifdef __i386__
# define ZIG_NO_I128
#endif
#ifdef __arm__
# define ZIG_NO_I128
#endif
#ifdef __mips__
# define ZIG_NO_I128
#endif
#ifdef __i386__
# define ZIG_NO_COMPLEX
#endif
#ifdef __mips__
# define ZIG_NO_COMPLEX
#endif
#ifndef ZIG_NO_I128
struct i128 {
__int128 value;
};
@@ -19,17 +40,22 @@ struct i128 {
struct u128 {
unsigned __int128 value;
};
#endif
void zig_u8(uint8_t);
void zig_u16(uint16_t);
void zig_u32(uint32_t);
void zig_u64(uint64_t);
#ifndef ZIG_NO_I128
void zig_struct_u128(struct u128);
#endif
void zig_i8(int8_t);
void zig_i16(int16_t);
void zig_i32(int32_t);
void zig_i64(int64_t);
#ifndef ZIG_NO_I128
void zig_struct_i128(struct i128);
#endif
void zig_five_integers(int32_t, int32_t, int32_t, int32_t, int32_t);
void zig_f32(float);
@@ -95,7 +121,9 @@ void zig_med_struct_mixed(struct MedStructMixed);
struct MedStructMixed zig_ret_med_struct_mixed();
void zig_small_packed_struct(uint8_t);
#ifndef ZIG_NO_I128
void zig_big_packed_struct(__int128);
#endif
struct SplitStructInts {
uint64_t a;
@@ -151,19 +179,26 @@ void run_c_tests(void) {
zig_u16(0xfffe);
zig_u32(0xfffffffd);
zig_u64(0xfffffffffffffffc);
#ifndef ZIG_NO_I128
{
struct u128 s = {0xfffffffffffffffc};
zig_struct_u128(s);
}
#endif
zig_i8(-1);
zig_i16(-2);
zig_i32(-3);
zig_i64(-4);
#ifndef ZIG_NO_I128
{
struct i128 s = {-6};
zig_struct_i128(s);
}
#endif
zig_five_integers(12, 34, 56, 78, 90);
zig_f32(12.34f);
@@ -175,6 +210,7 @@ void run_c_tests(void) {
zig_bool(true);
#ifndef ZIG_NO_COMPLEX
// TODO: Resolve https://github.com/ziglang/zig/issues/8465
//{
// float complex a = 1.25f + I * 2.6f;
@@ -211,23 +247,30 @@ void run_c_tests(void) {
assert_or_panic(creal(z) == 1.5);
assert_or_panic(cimag(z) == 13.5);
}
#endif
#if !defined __mips__ && !defined __riscv
{
struct BigStruct s = {1, 2, 3, 4, 5};
zig_big_struct(s);
}
#endif
#if !defined __i386__ && !defined __arm__ && !defined __mips__ && !defined __riscv
{
struct SmallStructInts s = {1, 2, 3, 4};
zig_small_struct_ints(s);
}
#endif
#ifndef ZIG_NO_I128
{
__int128 s = 0;
s |= 1 << 0;
s |= (__int128)2 << 64;
zig_big_packed_struct(s);
}
#endif
{
uint8_t s = 0;
@@ -238,21 +281,28 @@ void run_c_tests(void) {
zig_small_packed_struct(s);
}
#if !defined __i386__ && !defined __arm__ && !defined __mips__ && !defined __riscv
{
struct SplitStructInts s = {1234, 100, 1337};
zig_split_struct_ints(s);
}
#endif
#if !defined __arm__ && !defined __riscv
{
struct MedStructMixed s = {1234, 100.0f, 1337.0f};
zig_med_struct_mixed(s);
}
#endif
#if !defined __i386__ && !defined __arm__ && !defined __mips__ && !defined __riscv
{
struct SplitStructMixed s = {1234, 100, 1337.0f};
zig_split_struct_mixed(s);
}
#endif
#if !defined __mips__ && !defined __riscv
{
struct BigStruct s = {30, 31, 32, 33, 34};
struct BigStruct res = zig_big_struct_both(s);
@@ -262,25 +312,32 @@ void run_c_tests(void) {
assert_or_panic(res.d == 23);
assert_or_panic(res.e == 24);
}
#endif
#ifndef __riscv
{
struct Rect r1 = {1, 21, 16, 4};
struct Rect r2 = {178, 189, 21, 15};
zig_multiple_struct_ints(r1, r2);
}
#endif
#if !defined __mips__ && !defined __riscv
{
struct FloatRect r1 = {1, 21, 16, 4};
struct FloatRect r2 = {178, 189, 21, 15};
zig_multiple_struct_floats(r1, r2);
}
#endif
{
assert_or_panic(zig_ret_bool() == 1);
assert_or_panic(zig_ret_u8() == 0xff);
assert_or_panic(zig_ret_u16() == 0xffff);
#ifndef __riscv
assert_or_panic(zig_ret_u32() == 0xffffffff);
#endif
assert_or_panic(zig_ret_u64() == 0xffffffffffffffff);
assert_or_panic(zig_ret_i8() == -1);
@@ -306,9 +363,11 @@ void c_u64(uint64_t x) {
assert_or_panic(x == 0xfffffffffffffffcULL);
}
#ifndef ZIG_NO_I128
void c_struct_u128(struct u128 x) {
assert_or_panic(x.value == 0xfffffffffffffffcULL);
}
#endif
void c_i8(int8_t x) {
assert_or_panic(x == -1);
@@ -326,9 +385,11 @@ void c_i64(int64_t x) {
assert_or_panic(x == -4);
}
#ifndef ZIG_NO_I128
void c_struct_i128(struct i128 x) {
assert_or_panic(x.value == -6);
}
#endif
void c_f32(float x) {
assert_or_panic(x == 12.34f);
@@ -495,6 +556,7 @@ void c_small_packed_struct(uint8_t x) {
assert_or_panic(((x >> 6) & 0x3) == 3);
}
#ifndef ZIG_NO_I128
__int128 c_ret_big_packed_struct() {
__int128 s = 0;
s |= 1 << 0;
@@ -506,6 +568,7 @@ void c_big_packed_struct(__int128 x) {
assert_or_panic(((x >> 0) & 0xFFFFFFFFFFFFFFFF) == 1);
assert_or_panic(((x >> 64) & 0xFFFFFFFFFFFFFFFF) == 2);
}
#endif
struct SplitStructMixed c_ret_split_struct_mixed() {
struct SplitStructMixed s = {
@@ -596,3 +659,45 @@ int32_t c_ret_i32() {
int64_t c_ret_i64() {
return -1;
}
typedef struct {
uint32_t a;
uint8_t padding[4];
uint64_t b;
} StructWithArray;
void c_struct_with_array(StructWithArray x) {
assert_or_panic(x.a == 1);
assert_or_panic(x.b == 2);
}
StructWithArray c_ret_struct_with_array() {
return (StructWithArray) { 4, {}, 155 };
}
typedef struct {
struct Point {
double x;
double y;
} origin;
struct Size {
double width;
double height;
} size;
} FloatArrayStruct;
void c_float_array_struct(FloatArrayStruct x) {
assert_or_panic(x.origin.x == 5);
assert_or_panic(x.origin.y == 6);
assert_or_panic(x.size.width == 7);
assert_or_panic(x.size.height == 8);
}
FloatArrayStruct c_ret_float_array_struct() {
FloatArrayStruct x;
x.origin.x = 1;
x.origin.y = 2;
x.size.width = 3;
x.size.height = 4;
return x;
}

View File

@@ -2,6 +2,8 @@ const std = @import("std");
const builtin = @import("builtin");
const print = std.debug.print;
const expect = std.testing.expect;
const has_i128 = builtin.cpu.arch != .i386 and !builtin.cpu.arch.isARM() and
!builtin.cpu.arch.isMIPS();
extern fn run_c_tests() void;
@@ -40,13 +42,13 @@ test "C ABI integers" {
c_u16(0xfffe);
c_u32(0xfffffffd);
c_u64(0xfffffffffffffffc);
c_struct_u128(.{ .value = 0xfffffffffffffffc });
if (has_i128) c_struct_u128(.{ .value = 0xfffffffffffffffc });
c_i8(-1);
c_i16(-2);
c_i32(-3);
c_i64(-4);
c_struct_i128(.{ .value = -6 });
if (has_i128) c_struct_i128(.{ .value = -6 });
c_five_integers(12, 34, 56, 78, 90);
}
@@ -110,7 +112,6 @@ test "C ABI floats" {
}
test "C ABI long double" {
if (!builtin.cpu.arch.isWasm() and !builtin.cpu.arch.isAARCH64()) return error.SkipZigTest;
c_long_double(12.34);
}
@@ -166,8 +167,11 @@ extern fn c_cmultd_comp(a_r: f64, a_i: f64, b_r: f64, b_i: f64) ComplexDouble;
extern fn c_cmultf(a: ComplexFloat, b: ComplexFloat) ComplexFloat;
extern fn c_cmultd(a: ComplexDouble, b: ComplexDouble) ComplexDouble;
const complex_abi_compatible = builtin.cpu.arch != .i386 and !builtin.cpu.arch.isMIPS();
test "C ABI complex float" {
if (true) return error.SkipZigTest; // See https://github.com/ziglang/zig/issues/8465
if (!complex_abi_compatible) return error.SkipZigTest;
if (builtin.cpu.arch == .x86_64) return error.SkipZigTest; // See https://github.com/ziglang/zig/issues/8465
const a = ComplexFloat{ .real = 1.25, .imag = 2.6 };
const b = ComplexFloat{ .real = 11.3, .imag = -1.5 };
@@ -178,6 +182,8 @@ test "C ABI complex float" {
}
test "C ABI complex float by component" {
if (!complex_abi_compatible) return error.SkipZigTest;
const a = ComplexFloat{ .real = 1.25, .imag = 2.6 };
const b = ComplexFloat{ .real = 11.3, .imag = -1.5 };
@@ -187,6 +193,8 @@ test "C ABI complex float by component" {
}
test "C ABI complex double" {
if (!complex_abi_compatible) return error.SkipZigTest;
const a = ComplexDouble{ .real = 1.25, .imag = 2.6 };
const b = ComplexDouble{ .real = 11.3, .imag = -1.5 };
@@ -196,6 +204,8 @@ test "C ABI complex double" {
}
test "C ABI complex double by component" {
if (!complex_abi_compatible) return error.SkipZigTest;
const a = ComplexDouble{ .real = 1.25, .imag = 2.6 };
const b = ComplexDouble{ .real = 11.3, .imag = -1.5 };
@@ -250,6 +260,9 @@ const BigStruct = extern struct {
extern fn c_big_struct(BigStruct) void;
test "C ABI big struct" {
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var s = BigStruct{
.a = 1,
.b = 2,
@@ -274,6 +287,8 @@ const BigUnion = extern union {
extern fn c_big_union(BigUnion) void;
test "C ABI big union" {
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var x = BigUnion{
.a = BigStruct{
.a = 1,
@@ -304,6 +319,11 @@ extern fn c_med_struct_mixed(MedStructMixed) void;
extern fn c_ret_med_struct_mixed() MedStructMixed;
test "C ABI medium struct of ints and floats" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var s = MedStructMixed{
.a = 1234,
.b = 100.0,
@@ -332,6 +352,11 @@ extern fn c_small_struct_ints(SmallStructInts) void;
extern fn c_ret_small_struct_ints() SmallStructInts;
test "C ABI small struct of ints" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var s = SmallStructInts{
.a = 1,
.b = 2,
@@ -392,6 +417,8 @@ export fn zig_big_packed_struct(x: BigPackedStruct) void {
}
test "C ABI big packed struct" {
if (!has_i128) return error.SkipZigTest;
var s = BigPackedStruct{ .a = 1, .b = 2 };
c_big_packed_struct(s);
var s2 = c_ret_big_packed_struct();
@@ -407,6 +434,11 @@ const SplitStructInt = extern struct {
extern fn c_split_struct_ints(SplitStructInt) void;
test "C ABI split struct of ints" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var s = SplitStructInt{
.a = 1234,
.b = 100,
@@ -430,6 +462,11 @@ extern fn c_split_struct_mixed(SplitStructMixed) void;
extern fn c_ret_split_struct_mixed() SplitStructMixed;
test "C ABI split struct of ints and floats" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var s = SplitStructMixed{
.a = 1234,
.b = 100,
@@ -454,6 +491,9 @@ extern fn c_multiple_struct_ints(Rect, Rect) void;
extern fn c_multiple_struct_floats(FloatRect, FloatRect) void;
test "C ABI sret and byval together" {
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var s = BigStruct{
.a = 1,
.b = 2,
@@ -503,6 +543,10 @@ const Vector5 = extern struct {
extern fn c_big_struct_floats(Vector5) void;
test "C ABI structs of floats as parameter" {
if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var v3 = Vector3{
.x = 3.0,
.y = 6.0,
@@ -540,6 +584,8 @@ export fn zig_multiple_struct_ints(x: Rect, y: Rect) void {
}
test "C ABI structs of ints as multiple parameters" {
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var r1 = Rect{
.left = 1,
.right = 21,
@@ -574,6 +620,9 @@ export fn zig_multiple_struct_floats(x: FloatRect, y: FloatRect) void {
}
test "C ABI structs of floats as multiple parameters" {
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
var r1 = FloatRect{
.left = 1,
.right = 21,
@@ -665,3 +714,60 @@ test "C ABI integer return types" {
try expect(c_ret_i32() == -1);
try expect(c_ret_i64() == -1);
}
const StructWithArray = extern struct {
a: i32,
padding: [4]u8,
b: i64,
};
extern fn c_struct_with_array(StructWithArray) void;
extern fn c_ret_struct_with_array() StructWithArray;
test "Struct with array as padding." {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
c_struct_with_array(.{ .a = 1, .padding = undefined, .b = 2 });
var x = c_ret_struct_with_array();
try std.testing.expect(x.a == 4);
try std.testing.expect(x.b == 155);
}
const FloatArrayStruct = extern struct {
origin: extern struct {
x: f64,
y: f64,
},
size: extern struct {
width: f64,
height: f64,
},
};
extern fn c_float_array_struct(FloatArrayStruct) void;
extern fn c_ret_float_array_struct() FloatArrayStruct;
test "Float array like struct" {
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
c_float_array_struct(.{
.origin = .{
.x = 5,
.y = 6,
},
.size = .{
.width = 7,
.height = 8,
},
});
var x = c_ret_float_array_struct();
try std.testing.expect(x.origin.x == 1);
try std.testing.expect(x.origin.y == 2);
try std.testing.expect(x.size.width == 3);
try std.testing.expect(x.size.height == 4);
}

View File

@@ -1,16 +0,0 @@
pub fn main() void {
var x: usize = 3;
const y = add(10, 2, x);
if (y - 6 != 0) unreachable;
}
inline fn add(a: usize, b: usize, c: usize) usize {
if (a == 10) @compileError("bad");
return a + b + c;
}
// error
// output_mode=Exe
//
// :8:18: error: bad
// :3:18: note: called from here

View File

@@ -1,13 +0,0 @@
pub fn main() void {
var x: usize = 3;
const y = add(1, 2, x);
if (y - 6 != 0) unreachable;
}
inline fn add(a: usize, b: usize, c: usize) usize {
if (a == 10) @compileError("bad");
return a + b + c;
}
// run
//

View File

@@ -7,5 +7,5 @@ pub export fn entry() void {
// backend=stage2
// target=native
//
// :3:21: error: expected type '?*anyopaque', found '?usize'
// :3:21: note: optional type child 'usize' cannot cast into optional type child '*anyopaque'
// :3:9: error: expected type '?*anyopaque', found '?usize'
// :3:9: note: optional type child 'usize' cannot cast into optional type child '*anyopaque'

View File

@@ -7,4 +7,4 @@ export fn f() void {
// backend=stage2
// target=native
//
// :3:13: error: cannot assign to constant
// :3:7: error: cannot assign to constant

View File

@@ -7,4 +7,4 @@ export fn f() void {
// backend=stage2
// target=native
//
// :3:13: error: cannot assign to constant
// :3:7: error: cannot assign to constant

View File

@@ -10,4 +10,4 @@ export fn derp() void {
// backend=stage2
// target=native
//
// :6:15: error: cannot assign to constant
// :6:6: error: cannot assign to constant

View File

@@ -75,7 +75,7 @@ export fn entry18() void {
// backend=stage2
// target=native
//
// :3:9: error: cannot assign to constant
// :3:5: error: cannot assign to constant
// :7:7: error: cannot assign to constant
// :11:7: error: cannot assign to constant
// :15:7: error: cannot assign to constant

View File

@@ -1,20 +0,0 @@
export fn entry() void {
bad(bound_fn() == 1237);
}
const SimpleStruct = struct {
field: i32,
fn method(self: *const SimpleStruct) i32 {
return self.field + 3;
}
};
var simple_struct = SimpleStruct{ .field = 1234 };
const bound_fn = simple_struct.method;
fn bad(ok: bool) void {
_ = ok;
}
// error
// target=native
// backend=stage2
//
// :12:18: error: cannot load runtime value in comptime block

View File

@@ -20,5 +20,5 @@ export fn entry1() void {
// backend=stage2
// target=native
//
// :12:14: error: cannot assign to constant
// :16:14: error: cannot assign to constant
// :12:5: error: cannot assign to constant
// :16:5: error: cannot assign to constant

View File

@@ -0,0 +1,9 @@
var cc: @import("std").builtin.CallingConvention = .C;
export fn foo() callconv(cc) void {}
// error
// backend=stage2
// target=native
//
// :2:26: error: unable to resolve comptime value
// :2:26: note: calling convention must be comptime-known

View File

@@ -21,5 +21,5 @@ pub export fn entry() void {
// backend=stage2
// target=native
//
// :13:27: error: store to comptime variable depends on runtime condition
// :13:25: error: store to comptime variable depends on runtime condition
// :11:16: note: runtime condition here

View File

@@ -0,0 +1,14 @@
const Foo = struct {
x: i32,
};
var x: Foo = .{ .x = 2 };
comptime {
x = .{ .x = 3 };
}
// error
// backend=stage2
// target=native
//
// :6:17: error: unable to evaluate comptime expression
// :6:17: note: operation is runtime due to this operand

View File

@@ -14,5 +14,5 @@ export fn entry2() void {
// backend=llvm
// target=native
//
// :2:14: error: cannot load runtime value in comptime block
// :2:14: error: expected type 'f32', found 'f64'
// :9:19: error: expected type 'f32', found 'f64'

View File

@@ -14,5 +14,6 @@ export fn entry() usize { return @sizeOf(@TypeOf(a)); }
// backend=stage2
// target=native
//
// :6:26: error: cannot store to runtime value in comptime block
// :6:24: error: unable to evaluate comptime expression
// :6:5: note: operation is runtime due to this operand
// :4:17: note: called from here

View File

@@ -21,5 +21,6 @@ export fn function_with_return_type_type() void {
// backend=stage2
// target=native
//
// :3:7: error: cannot load runtime value in comptime block
// :3:7: error: unable to evaluate comptime expression
// :3:5: note: operation is runtime due to this operand
// :16:19: note: called from here

View File

@@ -10,5 +10,6 @@ export fn entry() usize { return @offsetOf(Foo, "y"); }
// backend=stage2
// target=native
//
// :5:25: error: cannot load runtime value in comptime block
// :5:18: error: unable to resolve comptime value
// :5:18: note: value being returned at comptime must be comptime-known
// :2:12: note: called from here

View File

@@ -9,4 +9,4 @@ export fn entry() void {
// backend=llvm
// target=native
//
// :2:10: error: cannot assign to constant
// :2:5: error: cannot assign to constant

View File

@@ -23,7 +23,7 @@ export fn qux() void {
// backend=stage2
// target=native
//
// :3:14: error: cannot assign to constant
// :7:13: error: cannot assign to constant
// :11:13: error: cannot assign to constant
// :19:13: error: cannot assign to constant
// :3:8: error: cannot assign to constant
// :7:8: error: cannot assign to constant
// :11:8: error: cannot assign to constant
// :19:8: error: cannot assign to constant

View File

@@ -8,4 +8,4 @@ export fn entry() void { f(); }
// backend=stage2
// target=native
//
// :3:9: error: cannot assign to constant
// :3:5: error: cannot assign to constant

View File

@@ -6,4 +6,5 @@ extern var foo: i32;
// error
//
// :2:15: error: cannot load runtime value in comptime block
// :2:19: error: unable to evaluate comptime expression
// :2:15: note: operation is runtime due to this operand

View File

@@ -1,13 +0,0 @@
pub fn main() void {
const y = fibonacci(7);
if (y - 21 != 0) unreachable;
}
inline fn fibonacci(n: usize) usize {
if (n <= 2) return n;
return fibonacci(n - 2) + fibonacci(n - 1);
}
// run
// target=x86_64-linux,arm-linux,wasm32-wasi
//

View File

@@ -1,20 +0,0 @@
// This additionally tests that the compile error reports the correct source location.
// Without storing source locations relative to the owner decl, the compile error
// here would be off by 2 bytes (from the "7" -> "999").
pub fn main() void {
const y = fibonacci(999);
if (y - 21 != 0) unreachable;
}
inline fn fibonacci(n: usize) usize {
if (n <= 2) return n;
return fibonacci(n - 2) + fibonacci(n - 1);
}
// error
//
// :11:21: error: evaluation exceeded 1000 backwards branches
// :11:21: note: use @setEvalBranchQuota() to raise the branch limit from 1000
// :11:40: note: called from here (6 times)
// :11:21: note: called from here (495 times)
// :5:24: note: called from here

View File

@@ -8,5 +8,5 @@ pub fn main() void {
// output_mode=Exe
// target=x86_64-linux
//
// :4:21: error: store to comptime variable depends on runtime condition
// :4:19: error: store to comptime variable depends on runtime condition
// :4:11: note: runtime condition here

View File

@@ -9,5 +9,5 @@ pub fn main() void {
// error
//
// :6:21: error: store to comptime variable depends on runtime condition
// :6:19: error: store to comptime variable depends on runtime condition
// :4:13: note: runtime condition here

View File

@@ -8,5 +8,5 @@ pub fn main() void {
// output_mode=Exe
// target=x86_64-macos
//
// :4:21: error: store to comptime variable depends on runtime condition
// :4:19: error: store to comptime variable depends on runtime condition
// :4:11: note: runtime condition here

View File

@@ -9,5 +9,5 @@ pub fn main() void {
// error
//
// :6:21: error: store to comptime variable depends on runtime condition
// :6:19: error: store to comptime variable depends on runtime condition
// :4:13: note: runtime condition here

View File

@@ -51,8 +51,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\var y: @import("std").builtin.CallingConvention = .C;
, &.{
":2:22: error: cannot load runtime value in comptime block",
":5:26: error: cannot load runtime value in comptime block",
":2:22: error: expected type 'type', found 'i32'",
":5:26: error: unable to resolve comptime value",
":5:26: note: calling convention must be comptime-known",
});
}