c: implement @errorName

This commit is contained in:
Jacob Young
2022-10-05 01:29:15 -04:00
parent c8d0e71de6
commit 6f3654ad69
3 changed files with 145 additions and 92 deletions

View File

@@ -1580,6 +1580,66 @@ pub const DeclGen = struct {
}
};
pub fn genErrDecls(o: *Object) !void {
if (o.dg.module.global_error_set.size == 0) return;
const writer = o.writer();
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
var max_name_len: usize = 0;
for (o.dg.module.error_name_list.items) |name, value| {
max_name_len = std.math.max(name.len, max_name_len);
var err_val_payload = Value.Payload.Error{ .data = .{ .name = name } };
try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_val_payload.base), .Other);
try writer.print(" = {d}u,\n", .{value});
}
o.indent_writer.popIndent();
try writer.writeAll("};\n");
const name_prefix = "zig_errorName_";
const name_buf = try o.dg.gpa.alloc(u8, name_prefix.len + max_name_len + 1);
defer o.dg.gpa.free(name_buf);
std.mem.copy(u8, name_buf, name_prefix);
for (o.dg.module.error_name_list.items) |name| {
std.mem.copy(u8, name_buf[name_prefix.len..], name);
name_buf[name_prefix.len + name.len] = 0;
const identifier = name_buf[0 .. name_prefix.len + name.len :0];
const nameZ = identifier[name_prefix.len..];
var name_ty_payload = Type.Payload.Len{
.base = .{ .tag = .array_u8_sentinel_0 },
.data = name.len,
};
const name_ty = Type.initPayload(&name_ty_payload.base);
var name_val_payload = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = nameZ };
const name_val = Value.initPayload(&name_val_payload.base);
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .Const, 0);
try writer.writeAll(" = ");
try o.dg.renderValue(writer, name_ty, name_val, .Other);
try writer.writeAll(";\n");
}
var name_array_ty_payload = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{
.len = o.dg.module.error_name_list.items.len,
.elem_type = Type.initTag(.const_slice_u8_sentinel_0),
} };
const name_array_ty = Type.initPayload(&name_array_ty_payload.base);
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = "zig_errorName" }, .Const, 0);
try writer.writeAll(" = {");
for (o.dg.module.error_name_list.items) |name, value| {
if (value != 0) try writer.writeByte(',');
try writer.print("{{zig_errorName_{}, {d}u}}", .{ fmtIdent(name), name.len });
}
try writer.writeAll("};\n");
}
pub fn genFunc(f: *Function) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -3982,11 +4042,10 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(un_op);
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
_ = operand;
_ = local;
return f.fail("TODO: C backend: implement airErrorName", .{});
try writer.writeAll(" = zig_errorName[");
try f.writeCValue(writer, operand);
try writer.writeAll("];\n");
return local;
}
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {

View File

@@ -260,30 +260,15 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
var f: Flush = .{};
defer f.deinit(gpa);
// Covers zig.h and err_typedef_item.
// Covers zig.h and typedef.
try f.all_buffers.ensureUnusedCapacity(gpa, 2);
if (zig_h.len != 0) {
f.all_buffers.appendAssumeCapacity(.{
.iov_base = zig_h,
.iov_len = zig_h.len,
});
f.file_size += zig_h.len;
}
f.appendBufAssumeCapacity(zig_h);
const err_typedef_writer = f.err_typedef_buf.writer(gpa);
const err_typedef_index = f.all_buffers.items.len;
const typedef_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1;
if (module.global_error_set.size > 0) {
try err_typedef_writer.writeAll("enum {\n");
var it = module.global_error_set.iterator();
while (it.next()) |entry| try err_typedef_writer.print(" zig_error_{s} = {d},\n", .{
codegen.fmtIdent(entry.key_ptr.*),
entry.value_ptr.*,
});
try err_typedef_writer.writeAll("};\n");
}
try self.flushErrDecls(&f);
// Typedefs, forward decls, and non-functions first.
// Unlike other backends, the .c code we are emitting is order-dependent. Therefore
@@ -301,38 +286,20 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
while (f.remaining_decls.popOrNull()) |kv| {
const decl_index = kv.key;
try flushDecl(self, &f, decl_index);
try self.flushDecl(&f, decl_index);
}
if (f.err_typedef_buf.items.len == 0) {
f.all_buffers.items[err_typedef_index] = .{
.iov_base = "",
.iov_len = 0,
};
} else {
f.all_buffers.items[err_typedef_index] = .{
.iov_base = f.err_typedef_buf.items.ptr,
.iov_len = f.err_typedef_buf.items.len,
};
f.file_size += f.err_typedef_buf.items.len;
}
f.all_buffers.items[typedef_index] = .{
.iov_base = if (f.typedef_buf.items.len > 0) f.typedef_buf.items.ptr else "",
.iov_len = f.typedef_buf.items.len,
};
f.file_size += f.typedef_buf.items.len;
// Now the function bodies.
try f.all_buffers.ensureUnusedCapacity(gpa, f.fn_count);
for (decl_keys) |decl_index, i| {
const decl = module.declPtr(decl_index);
if (decl.getFunction() != null) {
const decl_block = &decl_values[i];
const buf = decl_block.code.items;
if (buf.len != 0) {
f.all_buffers.appendAssumeCapacity(.{
.iov_base = buf.ptr,
.iov_len = buf.len,
});
f.file_size += buf.len;
}
}
}
for (decl_keys) |decl_index, i|
if (module.declPtr(decl_index).getFunction() != null)
f.appendBufAssumeCapacity(decl_values[i].code.items);
const file = self.base.file.?;
try file.setEndPos(f.file_size);
@@ -342,7 +309,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
const Flush = struct {
remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{},
typedefs: Typedefs = .{},
err_typedef_buf: std.ArrayListUnmanaged(u8) = .{},
typedef_buf: std.ArrayListUnmanaged(u8) = .{},
err_buf: std.ArrayListUnmanaged(u8) = .{},
/// We collect a list of buffers to write, and write them all at once with pwritev 😎
all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{},
/// Keeps track of the total bytes of `all_buffers`.
@@ -356,9 +324,16 @@ const Flush = struct {
std.hash_map.default_max_load_percentage,
);
fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void {
if (buf.len == 0) return;
f.all_buffers.appendAssumeCapacity(.{ .iov_base = buf.ptr, .iov_len = buf.len });
f.file_size += buf.len;
}
fn deinit(f: *Flush, gpa: Allocator) void {
f.all_buffers.deinit(gpa);
f.err_typedef_buf.deinit(gpa);
f.err_buf.deinit(gpa);
f.typedef_buf.deinit(gpa);
f.typedefs.deinit(gpa);
f.remaining_decls.deinit(gpa);
}
@@ -368,6 +343,57 @@ const FlushDeclError = error{
OutOfMemory,
};
fn flushTypedefs(self: *C, f: *Flush, typedefs: codegen.TypedefMap.Unmanaged) FlushDeclError!void {
if (typedefs.count() == 0) return;
const gpa = self.base.allocator;
const module = self.base.options.module.?;
try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, typedefs.count()), .{
.mod = module,
});
var it = typedefs.iterator();
while (it.next()) |new| {
const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{
.mod = module,
});
if (!gop.found_existing) {
try f.typedef_buf.appendSlice(gpa, new.value_ptr.rendered);
}
}
}
fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void {
const gpa = self.base.allocator;
const module = self.base.options.module.?;
var object = codegen.Object{
.dg = .{
.gpa = gpa,
.module = module,
.error_msg = null,
.decl_index = undefined,
.decl = undefined,
.fwd_decl = undefined,
.typedefs = codegen.TypedefMap.initContext(gpa, .{ .mod = module }),
.typedefs_arena = gpa,
},
.code = f.err_buf.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer object.dg.typedefs.deinit();
defer f.err_buf = object.code.moveToUnmanaged();
codegen.genErrDecls(&object) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
try self.flushTypedefs(f, object.dg.typedefs.unmanaged);
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
f.appendBufAssumeCapacity(object.code.items);
}
/// Assumes `decl` was in the `remaining_decls` set, and has already been removed.
fn flushDecl(self: *C, f: *Flush, decl_index: Module.Decl.Index) FlushDeclError!void {
const module = self.base.options.module.?;
@@ -384,43 +410,13 @@ fn flushDecl(self: *C, f: *Flush, decl_index: Module.Decl.Index) FlushDeclError!
const decl_block = self.decl_table.getPtr(decl_index).?;
const gpa = self.base.allocator;
if (decl_block.typedefs.count() != 0) {
try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, decl_block.typedefs.count()), .{
.mod = module,
});
var it = decl_block.typedefs.iterator();
while (it.next()) |new| {
const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{
.mod = module,
});
if (!gop.found_existing) {
try f.err_typedef_buf.appendSlice(gpa, new.value_ptr.rendered);
}
}
}
if (decl_block.fwd_decl.items.len != 0) {
const buf = decl_block.fwd_decl.items;
if (buf.len != 0) {
try f.all_buffers.append(gpa, .{
.iov_base = buf.ptr,
.iov_len = buf.len,
});
f.file_size += buf.len;
}
}
if (decl.getFunction() != null) {
f.fn_count += 1;
} else if (decl_block.code.items.len != 0) {
const buf = decl_block.code.items;
if (buf.len != 0) {
try f.all_buffers.append(gpa, .{
.iov_base = buf.ptr,
.iov_len = buf.len,
});
f.file_size += buf.len;
}
}
try self.flushTypedefs(f, decl_block.typedefs);
try f.all_buffers.ensureUnusedCapacity(gpa, 2);
f.appendBufAssumeCapacity(decl_block.fwd_decl.items);
if (decl.getFunction()) |_|
f.fn_count += 1
else
f.appendBufAssumeCapacity(decl_block.code.items);
}
pub fn flushEmitH(module: *Module) !void {

View File

@@ -556,7 +556,6 @@ test "error union comptime caching" {
}
test "@errorName" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -570,7 +569,6 @@ fn gimmeItBroke() anyerror {
}
test "@errorName sentinel length matches slice length" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;