Revert "Smaller memory footprint for BoundedArray (#16299)"

This reverts commit cb5a6be41a.

I deeply apologize for the churn.

This change is problematic given that we do not have ranged integers
(yet? see #3806).

In the meantime, this type needs to be `usize`, matching the length and
index types for all std lib data structures.

Users who want to save memory should not use heap-allocated BoundedArray
values, since it is inherently memory-inefficient. Use a different
memory layout instead.

If #3806 is accepted and implemented, the length value can become an
integer with the appropriate range, without the footgun. If that
proposal is not accepted, len type will remain a usize.
This commit is contained in:
Andrew Kelley 2024-08-23 22:30:10 -07:00
parent d9e8671d96
commit 85747b266a

View File

@ -39,16 +39,14 @@ pub fn BoundedArrayAligned(
) type {
return struct {
const Self = @This();
const Len = std.math.IntFittingRange(0, buffer_capacity);
buffer: [buffer_capacity]T align(alignment) = undefined,
len: Len = 0,
len: usize = 0,
/// Set the actual length of the slice.
/// Returns error.Overflow if it exceeds the length of the backing array.
pub fn init(len: usize) error{Overflow}!Self {
if (len > buffer_capacity) return error.Overflow;
return Self{ .len = @intCast(len) };
return Self{ .len = len };
}
/// View the internal array as a slice whose size was previously set.
@ -69,7 +67,7 @@ pub fn BoundedArrayAligned(
/// Does not initialize added items if any.
pub fn resize(self: *Self, len: usize) error{Overflow}!void {
if (len > buffer_capacity) return error.Overflow;
self.len = @intCast(len);
self.len = len;
}
/// Remove all elements from the slice.
@ -178,7 +176,7 @@ pub fn BoundedArrayAligned(
/// This operation is O(N).
pub fn insertSlice(self: *Self, i: usize, items: []const T) error{Overflow}!void {
try self.ensureUnusedCapacity(items.len);
self.len = @intCast(self.len + items.len);
self.len += items.len;
mem.copyBackwards(T, self.slice()[i + items.len .. self.len], self.constSlice()[i .. self.len - items.len]);
@memcpy(self.slice()[i..][0..items.len], items);
}
@ -208,7 +206,7 @@ pub fn BoundedArrayAligned(
for (self.constSlice()[after_range..], 0..) |item, i| {
self.slice()[after_subrange..][i] = item;
}
self.len = @intCast(self.len - len + new_items.len);
self.len -= len - new_items.len;
}
}
@ -259,7 +257,7 @@ pub fn BoundedArrayAligned(
/// enough to store the new items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.len;
self.len = @intCast(self.len + items.len);
self.len += items.len;
@memcpy(self.slice()[old_len..][0..items.len], items);
}
@ -275,8 +273,8 @@ pub fn BoundedArrayAligned(
/// Asserts the capacity is enough.
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const old_len = self.len;
assert(self.len + n <= buffer_capacity);
self.len = @intCast(self.len + n);
self.len += n;
assert(self.len <= buffer_capacity);
@memset(self.slice()[old_len..self.len], value);
}
@ -406,18 +404,6 @@ test BoundedArray {
try testing.expectEqualStrings(s, a.constSlice());
}
test "BoundedArray sizeOf" {
// Just sanity check size on one CPU
if (@import("builtin").cpu.arch != .x86_64)
return;
try testing.expectEqual(@sizeOf(BoundedArray(u8, 3)), 4);
// `len` is the minimum required size to hold the maximum capacity
try testing.expectEqual(@TypeOf(@as(BoundedArray(u8, 15), undefined).len), u4);
try testing.expectEqual(@TypeOf(@as(BoundedArray(u8, 16), undefined).len), u5);
}
test "BoundedArrayAligned" {
var a = try BoundedArrayAligned(u8, 16, 4).init(0);
try a.append(0);