From 0930ffc47f8453cee0e27d6347a7fff6047e8285 Mon Sep 17 00:00:00 2001 From: Vivianne Langdon Date: Tue, 26 Jul 2022 02:28:30 -0700 Subject: [PATCH] add Queue, Table and RefTable containers! --- notes.txt | 29 ++- src/containers/queue.zig | 134 +++++++++++++ src/containers/table.zig | 419 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 576 insertions(+), 6 deletions(-) create mode 100644 src/containers/queue.zig create mode 100644 src/containers/table.zig diff --git a/notes.txt b/notes.txt index d4f8058..2d43625 100644 --- a/notes.txt +++ b/notes.txt @@ -1,7 +1,24 @@ -- mach-glfw for glfw bindings -- vulkan-zig for vulkan - probably simplifies this part? - - Can always reimplement if desired. +generational structure of arrays, AKA slotmaps +- Important bit is that the components of each type are all in contiguous memory +- Core reference counted mapping, followed by dependent mappings for each + component type -- SpexGuy/Zig-VMA - vulkan memory allocator lib for zig. - - Outdated build.zig .. .gah. - - time to fork... + - In pim, the current component types are: + - Mesh + - Texture -or- TexTable + + - Appear to be two separate similar systems. + - First is containers/table.h, used in the non-vulkan renderer + - This has ref counting and is the first level. If no existing item + found, it calls into vkrTexTable which then uses IdAlloc. + + - IdAlloc is used for vulkan stuff and also for some + quake-specific stuff? + - IdAlloc does not have reference counting. + + - IdAlloc: + - IdAlloc_Alloc to get GenId. the data is passed to this and is allocated + to the correct size. + + - Going to work based on the assumption that reference counting won't hurt + to add. Can make it optional and probably will be fine. diff --git a/src/containers/queue.zig b/src/containers/queue.zig new file mode 100644 index 0000000..a3fcb90 --- /dev/null +++ b/src/containers/queue.zig @@ -0,0 +1,134 @@ +const std = @import("std"); + +/// Simple ringbuffer-based queue. Capacity will always be a power of two. +pub fn Queue(comptime T: type) type { + return struct { + const Self = @This(); + const initial_capacity: usize = 16; + + backing: []T, + reads: usize, + writes: usize, + allocator: std.mem.Allocator, + + pub fn init(allocator: std.mem.Allocator) Self { + return Self{ + .backing = &[0]T{}, + .reads = 0, + .writes = 0, + .allocator = allocator, + }; + } + + pub fn deinit(self: *Self) void { + self.clearRetainingCapacity(); + self.allocator.free(self.backing); + } + + pub fn clearRetainingCapacity(self: *Self) void { + self.reads = 0; + self.writes = 0; + } + + pub fn size(self: *const Self) usize { + return self.writes - self.reads; + } + + pub fn capacity(self: *const Self) usize { + return self.backing.len; + } + + pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void { + const capacity_sanitized = if (new_capacity > initial_capacity) + new_capacity + else + initial_capacity; + + const new_width = std.math.ceilPowerOfTwoAssert(usize, capacity_sanitized); + const old_width = self.backing.len; + if (new_width > old_width) { + const old_slice = self.backing; + const new_slice = try self.allocator.alloc(T, new_width); + const reads = self.reads; + const len = self.writes - self.reads; + const mask = old_width -% 1; + var i: usize = 0; + while (i < len) : (i += 1) { + const j = (reads + i) & mask; + new_slice[i] = old_slice[j]; + } + + self.allocator.free(old_slice); + self.backing = new_slice; + self.reads = 0; + self.writes = len; + } + } + + pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void { + return self.ensureTotalCapacity(self.size() + additional_count); + } + + pub fn push(self: *Self, val: T) !void { + try self.ensureUnusedCapacity(1); + const mask = self.backing.len - 1; + const dst = self.writes; + self.writes += 1; + self.backing[dst & mask] = val; + } + + pub fn popOrNull(self: *Self) ?T { + if (self.size() <= 0) { + return null; + } + + // length is assumed to be a power of 2 + const mask = self.backing.len - 1; + const src = self.reads; + self.reads += 1; + + // (Same as % self.backing.len because power of 2) + return self.backing[src & mask]; + } + + pub fn pop(self: *Self) T { + return self.popOrNull() orelse unreachable; + } + }; +} + +test "general queue usage" { + var q = Queue(u32).init(std.testing.allocator); + defer q.deinit(); + + try std.testing.expectEqual(@as(?u32, null), q.popOrNull()); + + try q.push(45); + try q.push(42); + try q.push(2); + try q.push(0); + + try std.testing.expectEqual(@as(u32, 45), q.pop()); + try std.testing.expectEqual(@as(u32, 42), q.pop()); + + try q.push(56); + + var i: u32 = 0; + while (i < 16) : (i += 1) { + try q.push(i); + } + + try std.testing.expectEqual(@as(u32, 2), q.popOrNull() orelse unreachable); + try std.testing.expectEqual(@as(u32, 0), q.pop()); + try std.testing.expectEqual(@as(u32, 56), q.pop()); + + i = 0; + while (i < 16) : (i += 1) { + try std.testing.expectEqual(i, q.pop()); + } + + try std.testing.expectEqual(@as(usize, 32), q.capacity()); + + try std.testing.expectEqual(@as(?u32, null), q.popOrNull()); + try std.testing.expectEqual(@as(?u32, null), q.popOrNull()); +} diff --git a/src/containers/table.zig b/src/containers/table.zig new file mode 100644 index 0000000..1261402 --- /dev/null +++ b/src/containers/table.zig @@ -0,0 +1,419 @@ +const builtin = @import("builtin"); +const std = @import("std"); +const assert = std.debug.assert; +const Queue = @import("queue.zig").Queue; + +/// A generational collection. Items can be quickly looked up by key, +/// and quickly removed. The free indices are stored in a queue and refilled as new +/// items are added. Ids will remain valid for the lifetime of the object, and when +/// the object is destroyed, will no longer be found in the collection. +/// +/// Table is stored as a Structure of Arrays, to improve cache locality. +/// +/// Note: +/// Currently hashing of the key is done automatically (using the logic of AutoHashMap). +/// In the future maybe support can be added to pass in a custom hash strategy. +pub fn Table(comptime K: type, comptime V: type) type { + return struct { + const Self = @This(); + + /// The ID for the table (index + generation) + pub const Id = struct { + index: usize, + gen: u8, + }; + + // Collections sharing an index: + + /// generations of the items in the table. incremented when an item is added to a previously-used slot. + gens: std.ArrayList(u8), + /// The actual values. + values: std.ArrayList(?V), + /// Keys for the table entries. + keys: std.ArrayList(?K), + + // Other fields + + /// List of indices which have previously been freed and are available to fill. + free_list: Queue(usize), + /// Used for mapping the key to its index. + lookup: std.AutoHashMap(K, usize), + /// The amount of items in the table (not the allocated size) + len: usize, + + pub fn init(allocator: std.mem.Allocator) Self { + return Self{ + .gens = std.ArrayList(u8).init(allocator), + .values = std.ArrayList(?V).init(allocator), + .keys = std.ArrayList(?K).init(allocator), + .free_list = Queue(usize).init(allocator), + .lookup = std.AutoHashMap(K, usize).init(allocator), + .len = 0, + }; + } + + pub fn deinit(self: *Self) void { + self.len = 0; + + self.gens.deinit(); + self.values.deinit(); + self.keys.deinit(); + self.free_list.deinit(); + self.lookup.deinit(); + } + + pub fn clear(self: *Self) void { + self.len = 0; + + self.gens.clearRetainingCapacity(); + self.values.clearRetainingCapacity(); + self.keys.clearRetainingCapacity(); + self.free_list.clearRetainingCapacity(); + self.lookup.clearRetainingCapacity(); + } + + pub fn size(self: *const Self) usize { + return self.len; + } + + pub fn exists(self: *const Self, id: Id) bool { + return id.index < self.values.items.len and id.gen == self.gens.items[id.index]; + } + + pub const AddResult = struct { + id: Id, + added: bool, + }; + + pub fn add(self: *Self, key: K, val: V) !AddResult { + if (self.find(key)) |id| { + return AddResult{ + .id = id, + .added = false, + }; + } + + if (self.free_list.popOrNull()) |index| { + const gen = self.gens.items[index]; + self.keys.items[index] = key; + self.values.items[index] = val; + try self.lookup.putNoClobber(key, index); + self.len += 1; + return AddResult{ + .id = .{ + .index = index, + .gen = gen, + }, + .added = true, + }; + } else { + self.len += 1; + try self.keys.append(key); + try self.values.append(val); + try self.gens.append(0); + + assert(self.len == self.keys.items.len); + assert(self.keys.items.len == self.values.items.len); + assert(self.values.items.len == self.gens.items.len); + + const index = self.keys.items.len - 1; + try self.lookup.putNoClobber(key, index); + + return AddResult{ + .id = .{ + .index = index, + .gen = 0, + }, + .added = true, + }; + } + } + + pub fn remove(self: *Self, id: Id) !V { + assert(self.len > 0); + + const index = id.index; + + const key = self.keys.items[index] orelse unreachable; + const removed = self.lookup.remove(key); + assert(removed); + + self.keys.items[index] = null; + + self.gens.items[index] += 1; + const val = self.values.items[index] orelse unreachable; + self.values.items[index] = null; + try self.free_list.push(index); + self.len -= 1; + return val; + } + + pub fn get(self: *Self, id: Id) ?*V { + return if (self.exists(id)) &(self.values.items[id.index] orelse unreachable) else null; + } + + pub fn find(self: *Self, key: K) ?Id { + if (self.lookup.get(key)) |index| { + const gen = self.gens.items[index]; + return Id{ + .index = index, + .gen = gen, + }; + } else { + return null; + } + } + + pub fn getKey(self: *Self, id: Id) ?K { + return if (self.exists(id)) (self.keys.items[id.index] orelse unreachable) else null; + } + }; +} + +const TestVal = struct { + a: u32, + b: u32, +}; + +test "general table test" { + var table = Table(u32, TestVal).init(std.testing.allocator); + defer table.deinit(); + + const first_result = try table.add(56, .{ .a = 42, .b = 87 }); + try std.testing.expect(first_result.added); + try std.testing.expectEqual(@as(u8, 0), first_result.id.gen); + try std.testing.expectEqual(@as(usize, 0), first_result.id.index); + + const second_result = try table.add(62, .{ .a = 1, .b = 12 }); + try std.testing.expect(second_result.added); + try std.testing.expectEqual(@as(u8, 0), second_result.id.gen); + try std.testing.expectEqual(@as(usize, 1), second_result.id.index); + + var second_id = table.find(62) orelse unreachable; + var second_val = table.get(second_id) orelse unreachable; + try std.testing.expectEqual(@as(u32, 1), second_val.a); + try std.testing.expectEqual(@as(u32, 12), second_val.b); + + try std.testing.expectEqual(@as(usize, 2), table.size()); + + _ = try table.remove(first_result.id); + + try std.testing.expect(!table.exists(first_result.id)); + try std.testing.expectEqual(@as(usize, 1), table.size()); + + // Ensure the id does not invalidate after removal of another + second_val = table.get(second_id) orelse unreachable; + try std.testing.expectEqual(@as(u32, 1), second_val.a); + try std.testing.expectEqual(@as(u32, 12), second_val.b); + + // Grab the id again and ensure that works too. + second_id = table.find(62) orelse unreachable; + second_val = table.get(second_id) orelse unreachable; + try std.testing.expectEqual(@as(u32, 1), second_val.a); + try std.testing.expectEqual(@as(u32, 12), second_val.b); +} + +test "table across generation" { + var table = Table(u32, TestVal).init(std.testing.allocator); + defer table.deinit(); + + const first_result = try table.add(48, .{ .a = 1, .b = 2 }); + _ = try table.add(28, .{ .a = 2, .b = 3 }); + + // remove first item, then add a new one, then try to access first item! + _ = try table.remove(table.find(48) orelse unreachable); + + const second_result = try table.add(99, .{ .a = 2, .b = 3 }); + try std.testing.expectEqual(@as(u8, 1), second_result.id.gen); + + try std.testing.expect(!table.exists(first_result.id)); + try std.testing.expectEqual(@as(?*TestVal, null), table.get(first_result.id)); +} + +/// Like Table, but values are refcounted. When duplicate items are added or retained, their +/// reference count is incremented. When items are released, their reference count is +/// decremented, and zero-count items are removed and returned. The ownership of the item then +/// transfers to the decrementer, for any memory cleanup that must happen. +pub fn RefTable(comptime K: type, comptime V: type) type { + return struct { + const Self = @This(); + const InternalTable = Table(K, V); + + pub const Id = InternalTable.Id; + + /// Underlying table. + table: InternalTable, + /// Reference counts of the values. + ref_counts: std.ArrayList(usize), + + pub fn init(allocator: std.mem.Allocator) Self { + return Self{ + .table = Table(K, V).init(allocator), + .ref_counts = std.ArrayList(usize).init(allocator), + }; + } + + /// Expects that all references have been cleaned up and will assert if + /// dangling references remain. To ignore dangling references, call clear() first. + pub fn deinit(self: *Self) void { + self.table.deinit(); + + if (builtin.mode == .Debug or builtin.mode == .ReleaseSafe) { + for (self.ref_counts.items) |count| { + assert(count == 0); + } + } + + self.ref_counts.deinit(); + } + + pub fn size(self: *const Self) usize { + return self.table.size(); + } + + pub fn remove(self: *Self, id: Id) !V { + self.ref_counts.items[id.index] = 0; + return self.table.remove(id); + } + + /// Increment the reference count of the item. + pub fn retain(self: *Self, id: Id) bool { + if (!self.table.exists(id)) { + return false; + } + + const ref_count = self.ref_counts.items[id.index]; + self.ref_counts.items[id.index] = ref_count + 1; + return true; + } + + /// Decrement the reference count of the item. If it reaches zero, the item will be removed + /// from the table and returned. It is the responsibility of the caller to free any + /// dynamic memory the value may point to. + pub fn release(self: *Self, id: Id) !?V { + if (!self.table.exists(id)) { + return null; + } + + const index = id.index; + var ref_count = self.ref_counts.items[index]; + ref_count -= 1; + self.ref_counts.items[index] = ref_count; + + return if (ref_count == 0) try self.remove(id) else null; + } + + /// Clear all items from the table. This should only be called if the items + /// do not point to dynamically allocated memory! + pub fn clear(self: *Self) void { + self.table.clear(); + self.ref_counts.clearRetainingCapacity(); + } + + pub fn exists(self: *const Self, id: Id) bool { + return self.table.exists(id); + } + + pub const AddResult = InternalTable.AddResult; + + /// Add to the table. If the ID already exists, a new reference will be + /// added to the reference count. + pub fn add(self: *Self, key: K, val: V) !AddResult { + if (self.find(key)) |id| { + _ = self.retain(id); + return AddResult{ + .id = id, + .added = false, + }; + } + + const result = try self.table.add(key, val); + assert(result.added); + if (result.id.index >= self.ref_counts.items.len) { + assert(result.id.index == self.ref_counts.items.len); + try self.ref_counts.append(1); + } else { + self.ref_counts.items[result.id.index] = 1; + } + + return result; + } + + pub fn get(self: *Self, id: Id) ?*V { + return self.table.get(id); + } + + pub fn find(self: *Self, key: K) ?Id { + return self.table.find(key); + } + + pub fn getKey(self: *Self, id: Id) ?K { + return self.table.getKey(id); + } + }; +} + +test "general ref table test" { + var table = RefTable(u32, TestVal).init(std.testing.allocator); + defer table.deinit(); + + const first_result = try table.add(56, .{ .a = 42, .b = 87 }); + try std.testing.expect(first_result.added); + try std.testing.expectEqual(@as(u8, 0), first_result.id.gen); + try std.testing.expectEqual(@as(usize, 0), first_result.id.index); + + const second_result = try table.add(62, .{ .a = 1, .b = 12 }); + try std.testing.expect(second_result.added); + try std.testing.expectEqual(@as(u8, 0), second_result.id.gen); + try std.testing.expectEqual(@as(usize, 1), second_result.id.index); + + var second_id = table.find(62) orelse unreachable; + var second_val = table.get(second_id) orelse unreachable; + try std.testing.expectEqual(@as(u32, 1), second_val.a); + try std.testing.expectEqual(@as(u32, 12), second_val.b); + + try std.testing.expectEqual(@as(usize, 2), table.size()); + + _ = try table.remove(first_result.id); + + try std.testing.expect(!table.exists(first_result.id)); + try std.testing.expectEqual(@as(usize, 1), table.size()); + + // Ensure the id does not invalidate after removal of another + second_val = table.get(second_id) orelse unreachable; + try std.testing.expectEqual(@as(u32, 1), second_val.a); + try std.testing.expectEqual(@as(u32, 12), second_val.b); + + // Grab the id again and ensure that works too. + second_id = table.find(62) orelse unreachable; + second_val = table.get(second_id) orelse unreachable; + try std.testing.expectEqual(@as(u32, 1), second_val.a); + try std.testing.expectEqual(@as(u32, 12), second_val.b); + + table.clear(); +} + +test "ref counting" { + var table = RefTable(u32, TestVal).init(std.testing.allocator); + defer table.deinit(); + + var first_result = try table.add(12, .{ .a = 5, .b = 6 }); + try std.testing.expectEqual(@as(usize, 1), table.size()); + var val = (try table.release(first_result.id)) orelse unreachable; + try std.testing.expectEqual(@as(usize, 0), table.size()); + try std.testing.expectEqual(@as(u32, 5), val.a); + try std.testing.expectEqual(@as(u32, 6), val.b); + + first_result = try table.add(12, .{ .a = 6, .b = 5 }); + try std.testing.expect(first_result.added); + try std.testing.expectEqual(@as(usize, 0), first_result.id.index); + try std.testing.expectEqual(@as(u8, 1), first_result.id.gen); + const second_result = try table.add(12, .{ .a = 1, .b = 2 }); + try std.testing.expect(!second_result.added); + + try std.testing.expectEqual(@as(?TestVal, null), (try table.release(first_result.id))); + val = (try table.release(first_result.id)) orelse unreachable; + try std.testing.expectEqual(@as(usize, 0), table.size()); + try std.testing.expectEqual(@as(u32, 6), val.a); + try std.testing.expectEqual(@as(u32, 5), val.b); +}