1
0
Fork 0
forked from vv/efemra

add iterator, remove the double-version bump again as optionals handle that for us

This commit is contained in:
Vivianne 2022-07-26 23:18:06 -07:00
parent 32942c01c8
commit 1e1efad968

View file

@ -16,11 +16,12 @@ const Queue = @import("queue.zig").Queue;
pub fn Table(comptime K: type, comptime V: type) type {
return struct {
const Self = @This();
const Size = u24;
/// The ID for the table (index + generation)
pub const Id = packed struct {
gen: u8,
index: u24,
index: Size,
};
// Collections sharing an index:
@ -35,19 +36,19 @@ pub fn Table(comptime K: type, comptime V: type) type {
// Other fields
/// List of indices which have previously been freed and are available to fill.
free_list: Queue(u24),
free_list: Queue(Size),
/// Used for mapping the key to its index.
lookup: std.AutoHashMap(K, u24),
lookup: std.AutoHashMap(K, Size),
/// The amount of items in the table (not the allocated size)
len: u24,
len: Size,
pub fn init(allocator: std.mem.Allocator) Self {
return Self{
.gens = std.ArrayList(u8).init(allocator),
.values = std.ArrayList(?V).init(allocator),
.keys = std.ArrayList(?K).init(allocator),
.free_list = Queue(u24).init(allocator),
.lookup = std.AutoHashMap(K, u24).init(allocator),
.free_list = Queue(Size).init(allocator),
.lookup = std.AutoHashMap(K, Size).init(allocator),
.len = 0,
};
}
@ -72,7 +73,11 @@ pub fn Table(comptime K: type, comptime V: type) type {
self.lookup.clearRetainingCapacity();
}
pub fn size(self: *const Self) usize {
pub fn capacity(self: *const Self) Size {
return @intCast(Size, self.values.items.len);
}
pub fn size(self: *const Self) Size {
return self.len;
}
@ -94,8 +99,6 @@ pub fn Table(comptime K: type, comptime V: type) type {
}
if (self.free_list.popOrNull()) |index| {
// gen is bumped on remove *and* re-add. This way, 'active' slots are even and 'inactive' slots are odd.
self.gens.items[index] = self.gens.items[index] +% 1;
self.keys.items[index] = key;
self.values.items[index] = val;
try self.lookup.putNoClobber(key, index);
@ -121,7 +124,7 @@ pub fn Table(comptime K: type, comptime V: type) type {
assert(self.keys.items.len == self.values.items.len);
assert(self.values.items.len == self.gens.items.len);
const index = @intCast(u24, self.keys.items.len - 1);
const index = @intCast(Size, self.keys.items.len - 1);
try self.lookup.putNoClobber(key, index);
return AddResult{
@ -172,6 +175,37 @@ pub fn Table(comptime K: type, comptime V: type) type {
pub fn getKey(self: *Self, id: Id) ?K {
return if (self.exists(id)) (self.keys.items[id.index] orelse unreachable) else null;
}
pub const Entry = struct {
id: Id,
val_ptr: *V,
};
pub const Iterator = struct {
tb: *const Self,
index: Size = 0,
pub fn next(it: *Iterator) ?Entry {
const cap = it.tb.capacity();
assert(it.index <= cap);
if (it.tb.size() == 0) return null;
while (it.index < cap) : (it.index += 1) {
if (it.tb.values.items[it.index]) |*val| {
const gen = it.tb.gens.items[it.index];
const id = Id{ .gen = gen, .index = it.index };
it.index += 1;
return Entry{ .id = id, .val_ptr = val };
}
}
return null;
}
};
pub fn iterator(self: *const Self) Iterator {
return .{ .tb = self };
}
};
}
@ -227,15 +261,38 @@ test "table across generation" {
// remove first item, then add a new one, then try to access first item!
_ = try table.remove(table.find(48) orelse unreachable);
try std.testing.expectEqual(@as(u8, 1), table.gens.items[0]);
const second_result = try table.add(99, .{ .a = 2, .b = 3 });
try std.testing.expectEqual(@as(u8, 2), second_result.id.gen);
try std.testing.expectEqual(@as(u8, 1), second_result.id.gen);
try std.testing.expect(!table.exists(first_result.id));
try std.testing.expectEqual(@as(?*TestVal, null), table.get(first_result.id));
}
test "table iteration" {
var table = Table(u32, TestVal).init(std.testing.allocator);
defer table.deinit();
_ = try table.add(19, .{ .a = 9, .b = 7 });
_ = try table.add(99, .{ .a = 98, .b = 2 });
const r = try table.add(35, .{ .a = 88, .b = 1 });
_ = try table.add(26, .{ .a = 80, .b = 8 });
_ = try table.remove(r.id);
_ = try table.add(87, .{ .a = 0, .b = 2 });
var itr = table.iterator();
var v = itr.next() orelse unreachable;
try std.testing.expect(v.val_ptr.a == 9);
v = itr.next() orelse unreachable;
try std.testing.expect(v.val_ptr.a == 98);
// newest added item inserted here (results are out of order)
v = itr.next() orelse unreachable;
try std.testing.expect(v.val_ptr.a == 0);
v = itr.next() orelse unreachable;
try std.testing.expect(v.val_ptr.a == 80);
try std.testing.expect(itr.next() == null);
}
/// Like Table, but values are refcounted. When duplicate items are added or retained, their
/// reference count is incremented. When items are released, their reference count is
/// decremented, and zero-count items are removed and returned. The ownership of the item then
@ -356,6 +413,13 @@ pub fn RefTable(comptime K: type, comptime V: type) type {
pub fn getKey(self: *Self, id: Id) ?K {
return self.table.getKey(id);
}
pub const Entry = InternalTable.Entry;
pub const Iterator = InternalTable.Iterator;
pub fn iterator(self: *Self) Iterator {
return self.table.iterator();
}
};
}
@ -413,7 +477,7 @@ test "ref counting" {
first_result = try table.add(12, .{ .a = 6, .b = 5 });
try std.testing.expect(first_result.added);
try std.testing.expectEqual(@as(u24, 0), first_result.id.index);
try std.testing.expectEqual(@as(u8, 2), first_result.id.gen);
try std.testing.expectEqual(@as(u8, 1), first_result.id.gen);
const second_result = try table.add(12, .{ .a = 1, .b = 2 });
try std.testing.expect(!second_result.added);