new allocator interface after Andrew Kelley review
This commit is contained in:
@@ -220,7 +220,6 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
}
|
||||
|
||||
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
assert(new_memory.len >= better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
@@ -443,7 +442,6 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
}
|
||||
|
||||
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
assert(new_memory.len >= better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ usingnamespace if (comptime @hasDecl(c, "malloc_size")) struct {
|
||||
pub const supports_malloc_size = false;
|
||||
};
|
||||
|
||||
pub const c_allocator = mem.getAllocatorPtr(&c_allocator_state);
|
||||
pub const c_allocator = &c_allocator_state;
|
||||
var c_allocator_state = Allocator{
|
||||
.allocFn = cAlloc,
|
||||
.resizeFn = cResize,
|
||||
@@ -38,7 +38,7 @@ fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocato
|
||||
return ptr[0..len];
|
||||
}
|
||||
const full_len = init: {
|
||||
if (comptime supports_malloc_size) {
|
||||
if (supports_malloc_size) {
|
||||
const s = malloc_size(ptr);
|
||||
assert(s >= len);
|
||||
break :init s;
|
||||
@@ -56,24 +56,23 @@ fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocato
|
||||
if (new_len <= buf.len) {
|
||||
return mem.alignAllocLen(buf.len, new_len, len_align);
|
||||
}
|
||||
if (comptime supports_malloc_size) {
|
||||
if (supports_malloc_size) {
|
||||
const full_len = malloc_size(buf.ptr);
|
||||
if (new_len <= full_len) {
|
||||
return mem.alignAllocLen(full_len, new_len, len_align);
|
||||
}
|
||||
}
|
||||
// TODO: could we still use realloc? are there any cases where we can guarantee that realloc won't move memory?
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
/// Thread-safe and lock-free.
|
||||
pub const page_allocator = if (std.Target.current.isWasm())
|
||||
mem.getAllocatorPtr(&wasm_page_allocator_state)
|
||||
&wasm_page_allocator_state
|
||||
else if (std.Target.current.os.tag == .freestanding)
|
||||
root.os.heap.page_allocator
|
||||
else
|
||||
mem.getAllocatorPtr(&page_allocator_state);
|
||||
&page_allocator_state;
|
||||
|
||||
var page_allocator_state = Allocator{
|
||||
.allocFn = PageAllocator.alloc,
|
||||
@@ -507,9 +506,9 @@ pub const FixedBufferAllocator = struct {
|
||||
return sliceContainsSlice(self.buffer, slice);
|
||||
}
|
||||
|
||||
// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
|
||||
// then we won't be able to determine what the last allocation was. This is because
|
||||
// the alignForward operation done in alloc is not reverisible.
|
||||
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
|
||||
/// then we won't be able to determine what the last allocation was. This is because
|
||||
/// the alignForward operation done in alloc is not reverisible.
|
||||
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
@@ -525,7 +524,7 @@ pub const FixedBufferAllocator = struct {
|
||||
const result = self.buffer[adjusted_index..new_end_index];
|
||||
self.end_index = new_end_index;
|
||||
|
||||
return result[0..mem.alignAllocLen(result.len, n, len_align)];
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
@@ -544,13 +543,12 @@ pub const FixedBufferAllocator = struct {
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align);
|
||||
}
|
||||
|
||||
var add = new_size - buf.len;
|
||||
const add = new_size - buf.len;
|
||||
if (add + self.end_index > self.buffer.len) {
|
||||
//add = self.buffer.len - self.end_index;
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.end_index += add;
|
||||
return mem.alignAllocLen(buf.len + add, new_size, len_align);
|
||||
return new_size;
|
||||
}
|
||||
|
||||
pub fn reset(self: *FixedBufferAllocator) void {
|
||||
@@ -735,7 +733,7 @@ test "ArenaAllocator" {
|
||||
|
||||
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
|
||||
test "FixedBufferAllocator" {
|
||||
var fixed_buffer_allocator = mem.sanityWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||
|
||||
try testAllocator(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
|
||||
@@ -802,8 +800,8 @@ test "ThreadSafeFixedBufferAllocator" {
|
||||
}
|
||||
|
||||
fn testAllocator(base_allocator: *mem.Allocator) !void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
var slice = try allocator.alloc(*i32, 100);
|
||||
testing.expect(slice.len == 100);
|
||||
@@ -833,8 +831,8 @@ fn testAllocator(base_allocator: *mem.Allocator) !void {
|
||||
}
|
||||
|
||||
fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
// initial
|
||||
var slice = try allocator.alignedAlloc(u8, alignment, 10);
|
||||
@@ -860,8 +858,8 @@ fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29)
|
||||
}
|
||||
|
||||
fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
//Maybe a platform's page_size is actually the same as or
|
||||
// very near usize?
|
||||
@@ -892,8 +890,8 @@ fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Err
|
||||
}
|
||||
|
||||
fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
|
||||
@@ -77,7 +77,7 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
const result = cur_buf[adjusted_index..new_end_index];
|
||||
self.state.end_index = new_end_index;
|
||||
return result[0..mem.alignAllocLen(result.len, n, len_align)];
|
||||
return result;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -70,7 +70,7 @@ test "LoggingAllocator" {
|
||||
var fbs = std.io.fixedBufferStream(&log_buf);
|
||||
|
||||
var allocator_buf: [10]u8 = undefined;
|
||||
var fixedBufferAllocator = std.mem.sanityWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
|
||||
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
|
||||
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
|
||||
|
||||
var a = try allocator.alloc(u8, 10);
|
||||
|
||||
@@ -141,6 +141,9 @@ pub const Allocator = struct {
|
||||
const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
|
||||
// DISABLED TO AVOID BUGS IN TRANSLATE C
|
||||
// use './zig build test-translate-c' to reproduce, some of the symbols in the
|
||||
// generated C code will be a sequence of 0xaa (the undefined value), meaning
|
||||
// it is printing data that has been freed
|
||||
//@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
_ = self.shrinkBytes(old_mem, 0, 0);
|
||||
return new_mem;
|
||||
@@ -214,6 +217,7 @@ pub const Allocator = struct {
|
||||
return self.allocWithOptions(Elem, n, null, sentinel);
|
||||
}
|
||||
|
||||
/// Deprecated: use `allocAdvanced`
|
||||
pub fn alignedAlloc(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
@@ -221,11 +225,11 @@ pub const Allocator = struct {
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
return self.alignedAlloc2(T, alignment, n, .exact);
|
||||
return self.allocAdvanced(T, alignment, n, .exact);
|
||||
}
|
||||
|
||||
const Exact = enum {exact,atLeast};
|
||||
pub fn alignedAlloc2(
|
||||
const Exact = enum {exact,at_least};
|
||||
pub fn allocAdvanced(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
@@ -234,7 +238,7 @@ pub const Allocator = struct {
|
||||
exact: Exact,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
const a = if (alignment) |a| blk: {
|
||||
if (a == @alignOf(T)) return alignedAlloc2(self, T, null, n, exact);
|
||||
if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
|
||||
break :blk a;
|
||||
} else @alignOf(T);
|
||||
|
||||
@@ -248,7 +252,10 @@ pub const Allocator = struct {
|
||||
// functions that heap-allocate their own frame with @Frame(func).
|
||||
const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
|
||||
const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
|
||||
assert(if (exact == .exact) byte_slice.len == byte_count else byte_slice.len >= byte_count);
|
||||
switch (exact) {
|
||||
.exact => assert(byte_slice.len == byte_count),
|
||||
.at_least => assert(byte_slice.len >= byte_count),
|
||||
}
|
||||
@memset(byte_slice.ptr, undefined, byte_slice.len);
|
||||
if (alignment == null) {
|
||||
// This if block is a workaround (see comment above)
|
||||
@@ -273,7 +280,7 @@ pub const Allocator = struct {
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedRealloc2(old_mem, old_alignment, new_n, .exact);
|
||||
return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
|
||||
}
|
||||
|
||||
pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: {
|
||||
@@ -281,25 +288,23 @@ pub const Allocator = struct {
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedRealloc2(old_mem, old_alignment, new_n, .atLeast);
|
||||
return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
|
||||
}
|
||||
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
// Deprecated: use `reallocAdvanced`
|
||||
pub fn alignedRealloc(
|
||||
self: *Allocator,
|
||||
old_mem: var,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
return self.alignedRealloc2(old_mem, new_alignment, new_n, .exact);
|
||||
return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
|
||||
}
|
||||
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedRealloc2(
|
||||
pub fn reallocAdvanced(
|
||||
self: *Allocator,
|
||||
old_mem: var,
|
||||
comptime new_alignment: u29,
|
||||
@@ -309,7 +314,7 @@ pub const Allocator = struct {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (old_mem.len == 0) {
|
||||
return self.alignedAlloc2(T, new_alignment, new_n, exact);
|
||||
return self.allocAdvanced(T, new_alignment, new_n, exact);
|
||||
}
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
@@ -392,24 +397,9 @@ pub const Allocator = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// Given a pointer to an allocator, return the *Allocator for it. `allocatorStatePtr` can
|
||||
/// either be a `*Allocator`, in which case it is returned as-is, otherwise, the address of
|
||||
/// the `allocator` field is returned.
|
||||
pub fn getAllocatorPtr(allocatorStatePtr: var) *Allocator {
|
||||
// allocator must be a pointer or else this function will return a copy of the allocator which
|
||||
// is not what this is for
|
||||
const T = @TypeOf(allocatorStatePtr);
|
||||
switch (@typeInfo(T)) {
|
||||
.Pointer => {},
|
||||
else => @compileError("getAllocatorPtr expects a pointer to an allocator but got: " ++ @typeName(T)),
|
||||
}
|
||||
if (T == *Allocator)
|
||||
return allocatorStatePtr;
|
||||
return &allocatorStatePtr.allocator;
|
||||
}
|
||||
|
||||
/// Detects and asserts if the std.mem.Allocator interface is violated
|
||||
pub fn SanityAllocator(comptime T: type) type { return struct {
|
||||
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
|
||||
/// or the allocator.
|
||||
pub fn ValidationAllocator(comptime T: type) type { return struct {
|
||||
const Self = @This();
|
||||
allocator: Allocator,
|
||||
underlying_allocator: T,
|
||||
@@ -424,7 +414,8 @@ pub fn SanityAllocator(comptime T: type) type { return struct {
|
||||
}
|
||||
fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
|
||||
if (T == *Allocator) return self.underlying_allocator;
|
||||
return getAllocatorPtr(&self.underlying_allocator);
|
||||
if (*T == *Allocator) return &self.underlying_allocator;
|
||||
return &self.underlying_allocator.allocator;
|
||||
}
|
||||
pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
assert(n > 0);
|
||||
@@ -436,6 +427,7 @@ pub fn SanityAllocator(comptime T: type) type { return struct {
|
||||
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
|
||||
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
|
||||
if (len_align == 0) {
|
||||
assert(result.len == n);
|
||||
} else {
|
||||
@@ -467,8 +459,8 @@ pub fn SanityAllocator(comptime T: type) type { return struct {
|
||||
};
|
||||
};}
|
||||
|
||||
pub fn sanityWrap(allocator: var) SanityAllocator(@TypeOf(allocator)) {
|
||||
return SanityAllocator(@TypeOf(allocator)).init(allocator);
|
||||
pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) {
|
||||
return ValidationAllocator(@TypeOf(allocator)).init(allocator);
|
||||
}
|
||||
|
||||
/// An allocator helper function. Adjusts an allocation length satisfy `len_align`.
|
||||
@@ -2377,6 +2369,8 @@ test "alignForward" {
|
||||
testing.expect(alignForward(17, 8) == 24);
|
||||
}
|
||||
|
||||
/// Round an address up to the previous aligned address
|
||||
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
|
||||
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
|
||||
if (@popCount(usize, alignment) == 1)
|
||||
return alignBackward(i, alignment);
|
||||
|
||||
@@ -11,7 +11,7 @@ pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.al
|
||||
pub const failing_allocator = &failing_allocator_instance.allocator;
|
||||
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
|
||||
|
||||
pub var base_allocator_instance = std.mem.sanityWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
|
||||
pub var base_allocator_instance = std.mem.validationWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
|
||||
var allocator_mem: [2 * 1024 * 1024]u8 = undefined;
|
||||
|
||||
/// This function is intended to be used only in tests. It prints diagnostics to stderr
|
||||
|
||||
Reference in New Issue
Block a user