allocgate: utilize a *const vtable field
This commit is contained in:
@@ -154,8 +154,11 @@ const CAllocator = struct {
|
||||
/// `malloc`/`free`, see `raw_c_allocator`.
|
||||
pub const c_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.allocFn = CAllocator.alloc,
|
||||
.resizeFn = CAllocator.resize,
|
||||
.vtable = &c_allocator_vtable,
|
||||
};
|
||||
const c_allocator_vtable = Allocator.VTable{
|
||||
.alloc = CAllocator.alloc,
|
||||
.resize = CAllocator.resize,
|
||||
};
|
||||
|
||||
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
|
||||
@@ -165,8 +168,11 @@ pub const c_allocator = Allocator{
|
||||
/// than `c_allocator`.
|
||||
pub const raw_c_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.allocFn = rawCAlloc,
|
||||
.resizeFn = rawCResize,
|
||||
.vtable = &raw_c_allocator_vtable,
|
||||
};
|
||||
const raw_c_allocator_vtable = Allocator.VTable{
|
||||
.alloc = rawCAlloc,
|
||||
.resize = rawCResize,
|
||||
};
|
||||
|
||||
fn rawCAlloc(
|
||||
@@ -208,16 +214,14 @@ fn rawCResize(
|
||||
pub const page_allocator = if (builtin.target.isWasm())
|
||||
Allocator{
|
||||
.ptr = undefined,
|
||||
.allocFn = WasmPageAllocator.alloc,
|
||||
.resizeFn = WasmPageAllocator.resize,
|
||||
.vtable = &WasmPageAllocator.vtable,
|
||||
}
|
||||
else if (builtin.target.os.tag == .freestanding)
|
||||
root.os.heap.page_allocator
|
||||
else
|
||||
Allocator{
|
||||
.ptr = undefined,
|
||||
.allocFn = PageAllocator.alloc,
|
||||
.resizeFn = PageAllocator.resize,
|
||||
.vtable = &PageAllocator.vtable,
|
||||
};
|
||||
|
||||
/// Verifies that the adjusted length will still map to the full length
|
||||
@@ -231,6 +235,11 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
|
||||
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
|
||||
|
||||
const PageAllocator = struct {
|
||||
const vtable = Allocator.VTable{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
};
|
||||
|
||||
fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = ra;
|
||||
assert(n > 0);
|
||||
@@ -400,6 +409,11 @@ const WasmPageAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
const vtable = Allocator.VTable{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
};
|
||||
|
||||
const PageStatus = enum(u1) {
|
||||
used = 0,
|
||||
free = 1,
|
||||
@@ -807,7 +821,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
|
||||
return self.fallback_allocator.allocFn(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
return self.fallback_allocator.vtable.alloc(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
}
|
||||
|
||||
fn resize(
|
||||
@@ -821,7 +835,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.resizeFn(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
|
||||
return self.fallback_allocator.vtable.resize(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -47,7 +47,7 @@ pub const ArenaAllocator = struct {
|
||||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const buf = try self.child_allocator.allocFn(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf = try self.child_allocator.vtable.alloc(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
|
||||
@@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
var it = self.large_allocations.iterator();
|
||||
while (it.next()) |large| {
|
||||
if (large.value_ptr.freed) {
|
||||
_ = self.backing_allocator.resizeFn(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
|
||||
_ = self.backing_allocator.vtable.resize(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
self.writer.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
if (result) |_| {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
} else |_| {
|
||||
@@ -53,7 +53,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
} else {
|
||||
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ pub fn ScopedLoggingAllocator(
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
if (result) |_| {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
@@ -78,7 +78,7 @@ pub fn ScopedLoggingAllocator(
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len == 0) {
|
||||
logHelper(success_log_level, "free - success - len: {}", .{buf.len});
|
||||
} else if (new_len <= buf.len) {
|
||||
|
||||
@@ -70,7 +70,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.allocFn(underlying.ptr, n, ptr_align, len_align, ret_addr);
|
||||
const result = try underlying.vtable.alloc(underlying.ptr, n, ptr_align, len_align, ret_addr);
|
||||
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
|
||||
if (len_align == 0) {
|
||||
assert(result.len == n);
|
||||
@@ -95,7 +95,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
assert(new_len >= len_align);
|
||||
}
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.resizeFn(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
|
||||
const result = try underlying.vtable.resize(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
|
||||
if (len_align == 0) {
|
||||
assert(result == new_len);
|
||||
} else {
|
||||
@@ -131,10 +131,14 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
|
||||
return adjusted;
|
||||
}
|
||||
|
||||
const failAllocator = Allocator{
|
||||
const fail_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.allocFn = failAllocatorAlloc,
|
||||
.resizeFn = Allocator.NoResize(c_void).noResize,
|
||||
.vtable = &failAllocator_vtable,
|
||||
};
|
||||
|
||||
const failAllocator_vtable = Allocator.VTable{
|
||||
.alloc = failAllocatorAlloc,
|
||||
.resize = Allocator.NoResize(c_void).noResize,
|
||||
};
|
||||
|
||||
fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
|
||||
@@ -146,8 +150,8 @@ fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra:
|
||||
}
|
||||
|
||||
test "mem.Allocator basics" {
|
||||
try testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1));
|
||||
try testing.expectError(error.OutOfMemory, failAllocator.allocSentinel(u8, 1, 0));
|
||||
try testing.expectError(error.OutOfMemory, fail_allocator.alloc(u8, 1));
|
||||
try testing.expectError(error.OutOfMemory, fail_allocator.allocSentinel(u8, 1, 0));
|
||||
}
|
||||
|
||||
test "Allocator.resize" {
|
||||
|
||||
@@ -10,39 +10,42 @@ pub const Error = error{OutOfMemory};
|
||||
|
||||
// The type erased pointer to the allocator implementation
|
||||
ptr: *c_void,
|
||||
vtable: *const VTable,
|
||||
|
||||
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
allocFn: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
pub const VTable = struct {
|
||||
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
alloc: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
|
||||
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
|
||||
/// that was passed as the `ptr_align` parameter to the original `allocFn` call.
|
||||
///
|
||||
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
|
||||
/// longer be passed to `resizeFn`.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
|
||||
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
|
||||
/// accepting more bytes of memory from the allocator than requested.
|
||||
///
|
||||
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
resizeFn: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
|
||||
/// length returned by `alloc` or `resize`. `buf_align` must equal the same value
|
||||
/// that was passed as the `ptr_align` parameter to the original `alloc` call.
|
||||
///
|
||||
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
|
||||
/// longer be passed to `resize`.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
|
||||
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
|
||||
/// accepting more bytes of memory from the allocator than requested.
|
||||
///
|
||||
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
};
|
||||
|
||||
pub fn init(
|
||||
pointer: anytype,
|
||||
@@ -64,11 +67,14 @@ pub fn init(
|
||||
return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
|
||||
}
|
||||
};
|
||||
const vtable = VTable{
|
||||
.alloc = gen.alloc,
|
||||
.resize = gen.resize,
|
||||
};
|
||||
|
||||
return .{
|
||||
.ptr = pointer,
|
||||
.allocFn = gen.alloc,
|
||||
.resizeFn = gen.resize,
|
||||
.vtable = &vtable,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -141,7 +147,7 @@ fn reallocBytes(
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.allocFn(self.ptr, new_byte_count, new_alignment, len_align, return_address);
|
||||
const new_mem = try self.vtable.alloc(self.ptr, new_byte_count, new_alignment, len_align, return_address);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
@@ -152,7 +158,7 @@ fn reallocBytes(
|
||||
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.resizeFn(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
if (self.vtable.resize(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
@@ -178,7 +184,7 @@ fn moveBytes(
|
||||
) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.allocFn(self.ptr, new_len, new_alignment, len_align, return_address);
|
||||
const new_mem = try self.vtable.alloc(self.ptr, new_len, new_alignment, len_align, return_address);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
@@ -320,7 +326,7 @@ pub fn allocAdvancedWithRetAddr(
|
||||
.exact => 0,
|
||||
.at_least => size_of_T,
|
||||
};
|
||||
const byte_slice = try self.allocFn(self.ptr, byte_count, a, len_align, return_address);
|
||||
const byte_slice = try self.vtable.alloc(self.ptr, byte_count, a, len_align, return_address);
|
||||
switch (exact) {
|
||||
.exact => assert(byte_slice.len == byte_count),
|
||||
.at_least => assert(byte_slice.len >= byte_count),
|
||||
@@ -345,7 +351,7 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
const rc = try self.resizeFn(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
const rc = try self.vtable.resize(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
assert(rc == new_byte_count);
|
||||
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
|
||||
return mem.bytesAsSlice(T, new_byte_slice);
|
||||
@@ -514,5 +520,5 @@ pub fn shrinkBytes(
|
||||
return_address: usize,
|
||||
) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.resizeFn(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ pub const FailingAllocator = struct {
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.allocFn(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
const result = try self.internal_allocator.vtable.alloc(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
self.allocated_bytes += result.len;
|
||||
self.allocations += 1;
|
||||
self.index += 1;
|
||||
@@ -69,7 +69,7 @@ pub const FailingAllocator = struct {
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const r = self.internal_allocator.resizeFn(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
const r = self.internal_allocator.vtable.resize(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
std.debug.assert(new_len > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user