forked from vv/efemra
1
0
Fork 0

looping with validation errors

This commit is contained in:
Vivianne 2022-07-22 01:30:43 -07:00
parent a8c13de7b6
commit 03162f6f0b
10 changed files with 386 additions and 67 deletions

View File

@ -2,6 +2,7 @@ const std = @import("std");
const assert = std.debug.assert;
const vk = @import("vulkan");
const vma = @import("vma");
const time = @import("../../common/time.zig");
const ProfileMark = @import("../../common/profiler.zig").ProfileMark;
const Device = @import("device.zig").Device;
@ -10,7 +11,7 @@ const settings = @import("settings.zig");
const Command = @import("Command.zig");
const SubmitId = @import("submit_id.zig").SubmitId;
const memory = @import("memory.zig");
const QueueId = @import("queues.zig").QueueId;
const queues = @import("queues.zig");
const Buffer = @This();
@ -27,16 +28,19 @@ pub fn init(size: usize, usage: vk.BufferUsageFlags, mem_usage: vma.MemoryUsage)
pub fn release(self: *Buffer, device: *const Device) !void {
if (self.handle != .null_handle) {
try memory.Releasable.init(&.{
// .frame = get frame count
.submit_id = try self.getSubmit(),
.object = .{
const id = try self.getSubmit();
const frame_count = time.getFrameCount();
_ = try memory.Releasable.init(
device,
frame_count,
id,
.{
.buffer = .{
.handle = self.handle,
.allocation = self.allocation,
},
},
}, device);
);
}
self.* = std.mem.zeroes(Buffer);
}
@ -77,44 +81,44 @@ pub fn unmapRead(self: *const Buffer, device: *const Device) void {
memory.unmap(device, self.allocation);
}
pub fn mapWrite(self: *const Buffer, device: *const Device) !anyopaque {
pub fn mapWrite(self: *Buffer, device: *const Device) ![*]u8 {
if (self.state.stage.toInt() == 0) {
// newly created buffer
self.state.stage = .{ .host_bit = true };
self.state.access = .{ .host_write_bit = true };
}
if (!self.state.stage.host_bit || !self.state.access.host_write_bit) {
if (!self.state.stage.host_bit or !self.state.access.host_write_bit) {
// kara claims this sucks but she's lazy
var cmd = try Command.Buffer.get(.graphics);
var cmd = try Command.Buffer.get(.graphics, device);
_ = try State.hostWrite(device, cmd, self);
const submit_id = try cmd.submit(device, .null_handle, .{}, .null_handle);
const submit_id = try cmd.submit(device, null, .{}, null);
try submit_id.wait(device);
cmd = try Command.Buffer.get(.graphics);
assert(cmd != .null_handle);
cmd = try Command.Buffer.get(.graphics, device);
assert(cmd.handle != .null_handle);
}
assert(self.state.stage.host_bit);
assert(self.state.access.host_write_bit);
return memory.map(device, self.allocation);
return try memory.map(self.allocation);
}
pub fn unmapWrite(self: *const Buffer, device: *const Device) void {
pub fn unmapWrite(self: *const Buffer) void {
assert(self.state.stage.host_bit);
assert(self.state.access.host_write_bit);
memory.unmap(device, self.allocation);
memory.flush(device, self.allocation);
memory.unmap(self.allocation);
memory.flush(self.allocation);
}
const pm_write = ProfileMark.init("Buffer.write");
pub fn write(self: *const Buffer, device: *const Device, src: []const u8) !void {
pub fn write(self: *Buffer, device: *const Device, src: []const u8) !void {
pm_write.begin();
defer pm_write.end();
assert(src.len <= self.size);
const dst = try self.mapWrite(device);
defer self.unmapWrite(device);
@memcpy(dst, src, src.len);
var dst = try self.mapWrite(device);
defer self.unmapWrite();
@memcpy(dst, src.ptr, src.len);
}
const pm_read = ProfileMark.init("Buffer.read");
@ -140,14 +144,265 @@ pub fn getSubmit(self: *const Buffer) !SubmitId {
return id;
}
return SubmitId{};
return error.InvalidBufferHandle;
}
pub const State = struct {
owner: QueueId,
cmd_id: u32,
owner: queues.QueueId,
cmd_id: u32 = 0,
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
pub fn hostRead(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.xfer);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .host_bit = true },
.access = .{ .host_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn hostWrite(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.xfer);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .host_bit = true },
.access = .{ .host_write_bit = true },
};
return try state.handle(device, buf);
}
pub fn transferSrc(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.xfer);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .transfer_bit = true },
.access = .{ .transfer_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn transferDst(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.xfer);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .transfer_bit = true },
.access = .{ .transfer_write_bit = true },
};
return try state.handle(device, buf);
}
pub fn uniformBuffer(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx or cmdbuf.comp);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .vertex_shader_bit = true, .fragment_shader_bit = true, .compute_shader_bit = true },
.access = .{ .uniform_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn indirectDraw(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .draw_indirect_bit = true },
.access = .{ .indirect_command_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn indirectDispatch(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.comp);
const state = State{
.owner = cmdbuf.queue_id,
// kara was not sure if draw indirect stage is needed here or not
.stage = .{ .draw_indirect_bit = true, .compute_shader_bit = true },
.access = .{ .indirect_command_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn vertexBuffer(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .vertex_input_bit = true },
.access = .{ .vertex_attribute_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn indexBuffer(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .vertex_input_bit = true },
.access = .{ .index_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn fragLoad(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn fragStore(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_write_bit = true },
};
return try state.handle(device, buf);
}
pub fn fragLoadStore(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.gfx);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true, .shader_write_bit = true },
};
return try state.handle(device, buf);
}
pub fn computeLoad(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.comp);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true },
};
return try state.handle(device, buf);
}
pub fn computeStore(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.comp);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_write_bit = true },
};
return try state.handle(device, buf);
}
pub fn computeLoadStore(device: *const Device, cmdbuf: *const Command.Buffer, buf: *Buffer) !bool {
assert(cmdbuf.handle != .null_handle);
assert(cmdbuf.comp);
const state = State{
.owner = cmdbuf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true, .shader_write_bit = true },
};
return try state.handle(device, buf);
}
fn handle(self: *const State, device: *const Device, buf: *Buffer) !bool {
assert(buf.handle != .null_handle);
assert(self.access.toInt() != 0);
assert(self.stage.toInt() != 0);
var inserted_barrier = false;
const prev = &buf.state;
const prev_queue = queues.get(prev.owner);
const next_queue = queues.get(self.owner);
assert(self.stage.intersect(next_queue.stage_mask).toInt() == self.stage.toInt());
assert(self.access.intersect(next_queue.access_mask).toInt() == self.access.toInt());
var new_resource = false;
if (prev.stage.toInt() == 0) {
prev.stage = .{ .top_of_pipe_bit = true };
new_resource = true;
}
if (prev_queue != next_queue) {
// queue ownership transfer
var src_cmd = try Command.Buffer.get(prev.owner, device);
var dst_cmd = try Command.Buffer.get(self.owner, device);
if (src_cmd.queue_transfer_dst or dst_cmd.queue_transfer_src) {
try Command.Buffer.flushQueueTransfers(device);
}
const barrier = vk.BufferMemoryBarrier{
.src_access_mask = prev.access,
.dst_access_mask = self.access,
.src_queue_family_index = prev_queue.family,
.dst_queue_family_index = next_queue.family,
.buffer = buf.handle,
.offset = 0,
.size = vk.WHOLE_SIZE,
};
assert(!src_cmd.queue_transfer_dst);
src_cmd.queue_transfer_src = true;
device.dispatch.cmdPipelineBarrier(src_cmd.handle, prev.stage, self.stage, .{}, 0, undefined, 1, @ptrCast([*]const vk.BufferMemoryBarrier, &barrier), 0, undefined);
assert(!dst_cmd.queue_transfer_src);
dst_cmd.queue_transfer_dst = true;
device.dispatch.cmdPipelineBarrier(dst_cmd.handle, prev.stage, self.stage, .{}, 0, undefined, 1, @ptrCast([*]const vk.BufferMemoryBarrier, &barrier), 0, undefined);
prev.* = self.*;
inserted_barrier = true;
} else {
var need_barrier = false;
const host_only = prev.stage.host_bit and self.stage.host_bit;
if (!host_only) {
const src_read = prev.access.intersect(Command.read_access).toInt() != 0;
const src_write = prev.access.intersect(Command.write_access).toInt() != 0;
const dst_read = self.access.intersect(Command.read_access).toInt() != 0;
const dst_write = self.access.intersect(Command.write_access).toInt() != 0;
const read_after_write = src_write and dst_read;
const write_after_read = src_read and dst_write;
const write_after_write = src_write and dst_write;
need_barrier = read_after_write or write_after_read or write_after_write or new_resource;
}
if (need_barrier) {
// data hazard, insert barrier
const cmd = try Command.Buffer.get(self.owner, device);
assert(!cmd.in_render_pass);
const barrier = vk.BufferMemoryBarrier{
.src_access_mask = prev.access,
.dst_access_mask = self.access,
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
.buffer = buf.handle,
.offset = 0,
.size = vk.WHOLE_SIZE,
};
device.dispatch.cmdPipelineBarrier(cmd.handle, prev.stage, self.stage, .{}, 0, undefined, 1, @ptrCast([*]const vk.BufferMemoryBarrier, &barrier), 0, undefined);
prev.* = self.*;
inserted_barrier = true;
} else {
// no data hazard, append usage state
prev.stage = prev.stage.merge(self.stage);
prev.access = prev.access.merge(self.access);
}
}
(try Command.Buffer.get(self.owner, device)).touchBuffer(buf);
return inserted_barrier;
}
};
pub const Set = struct {
@ -173,7 +428,7 @@ pub const Set = struct {
return try self.current(swapchain).reserve(device, size, usage, mem_usage);
}
pub fn write(self: *const Set, device: *const Device, swapchain: *const Swapchain, src: []const u8) !void {
pub fn write(self: *Set, device: *const Device, swapchain: *const Swapchain, src: []const u8) !void {
try self.current(swapchain).write(device, src);
}
@ -181,7 +436,7 @@ pub const Set = struct {
try self.current(swapchain).read(device, dst);
}
pub fn current(self: *const Set, swapchain: *const Swapchain) *const Buffer {
pub fn current(self: *Set, swapchain: *const Swapchain) *Buffer {
const sync_index = swapchain.getSyncIndex();
assert(sync_index < self.frames.len);
return &self.frames[sync_index];

View File

@ -15,6 +15,7 @@ const SubmitId = @import("submit_id.zig").SubmitId;
const Fence = @import("sync.zig").Fence;
const Semaphore = @import("sync.zig").Semaphore;
const Bindings = @import("Bindings.zig");
const BufferBuffer = @import("Buffer.zig");
const queues = @import("queues.zig");
@ -304,7 +305,7 @@ pub const Buffer = struct {
device.dispatch.cmdDraw(self.handle, vertex_count, 1, first_vertex, 0);
}
pub fn touchBuffer(self: *Self, buf: *const Buffer) void {
pub fn touchBuffer(self: *Self, buf: *BufferBuffer) void {
assert(self.handle != .null_handle);
assert(buf.handle != .null_handle);
buf.state.cmd_id = self.id;
@ -378,7 +379,7 @@ pub const Buffer = struct {
return submit_id;
}
const FlushQueueTransfersError = DeviceDispatch.EndCommandBufferError || DeviceDispatch.QueueSubmitError;
pub const FlushQueueTransfersError = DeviceDispatch.EndCommandBufferError || DeviceDispatch.QueueSubmitError;
const pm_buffer_flushqueuetransfers = ProfileMark.init("Command.Buffer.flushqueuetransfers");
pub fn flushQueueTransfers(device: *const Device) FlushQueueTransfersError!void {
pm_buffer_flushqueuetransfers.begin();

View File

@ -48,12 +48,12 @@ pub fn deinit(device: *const Device) void {
}
const pm_setup = ProfileMark.init("MainPass.setup");
pub fn setup(device: *const Device, swapchain: *const Swapchain) void {
pub fn setup(device: *const Device, swapchain: *const Swapchain) !void {
pm_setup.begin();
defer pm_setup.end();
DepthPass.setup();
OpaquePass.setup(device, swapchain);
try OpaquePass.setup(device, swapchain);
// Exposure.setup();
// UIPass.setup();
}
@ -141,7 +141,6 @@ const DepthPass = struct {
.depth_clamp = false,
.depth_test_enable = true,
.depth_write_enable = true,
.attachment_count = 0,
},
};
@ -309,7 +308,7 @@ const OpaquePass = struct {
.{
.binding = 3,
.location = 3,
.format = .r32g32b32a32_sfloat,
.format = .r32g32b32a32_uint,
.offset = 0,
},
};
@ -335,18 +334,17 @@ const OpaquePass = struct {
.depth_clamp = false,
.depth_test_enable = true,
.depth_write_enable = false,
.attachment_count = 1,
.attachments = undefined,
.attachments = &[_]Pass.BlendState{
.{
.color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true },
.blend_enable = true,
.color_blend_op = .add,
.alpha_blend_op = .add,
},
},
},
};
desc.fixed_funcs.attachments.?[0] = .{
.color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true },
.blend_enable = true,
.color_blend_op = .add,
.alpha_blend_op = .add,
};
try s_pass.init(device, &desc);
}
@ -356,17 +354,17 @@ const OpaquePass = struct {
}
const pm_opaque_setup = ProfileMark.init("OpaquePass.setup");
pub fn setup(device: *const Device, swapchain: *const Swapchain) void {
pub fn setup(device: *const Device, swapchain: *const Swapchain) !void {
pm_opaque_setup.begin();
defer pm_opaque_setup.end();
// TODO: move this into some parameter provider standalone file, says kara
{
const camera = Camera.get();
const globals = .{
const globals = Globals{
.g_WorldToClip = camera.getWorldToClip(Targets.getRenderAspect()),
.g_Eye = camera.position,
.g_HdrEnabled = false, // TODO getHdrEnabled
.g_HdrEnabled = 0, // TODO getHdrEnabled
.g_Whitepoint = 0, // TODO getWhitepoint
.g_DisplayNits = 0, // TODO getDisplayNitsMax
.g_UiNits = 0, // TODO getUiNits
@ -380,9 +378,7 @@ const OpaquePass = struct {
},
};
_ = device;
_ = globals;
//try s_per_camera_buffer.write(device, swapchain, &@ptrCast([@sizeOf(Globals)]u8, &globals));
try s_per_camera_buffer.write(device, swapchain, @ptrCast([*]const u8, &globals)[0..@sizeOf(Globals)]);
}
Bindings.bindBuffer(settings.Bid.Globals.id, .uniform_buffer, s_per_camera_buffer.current(swapchain));

View File

@ -42,8 +42,7 @@ pub const FixedFuncs = struct {
depth_clamp: bool,
depth_test_enable: bool,
depth_write_enable: bool,
attachment_count: u3,
attachments: ?[8]BlendState = null,
attachments: ?[]const BlendState = null,
};
pub const Description = struct {

View File

@ -75,7 +75,7 @@ pub fn update(self: *Self) !void {
{
_ = try self.swapchain.acquireSync(&self.device);
_ = try self.swapchain.acquireImage(&self.device);
memory.update();
try memory.update(&self.device);
}
// system update
@ -85,7 +85,7 @@ pub fn update(self: *Self) !void {
// setup phase
{
MainPass.setup(&self.device, &self.swapchain);
try MainPass.setup(&self.device, &self.swapchain);
// TODO textable update
try Command.flush(&self.device);
Bindings.update(&self.device, &self.swapchain);

View File

@ -277,7 +277,7 @@ pub const Image = struct {
try Command.Buffer.flushQueueTransfers(device);
}
const barrier: vk.ImageMemoryBarrier = .{
const barrier = vk.ImageMemoryBarrier{
.src_access_mask = prev.access,
.dst_access_mask = self.access,
.old_layout = prev.layout,
@ -304,17 +304,17 @@ pub const Image = struct {
inserted_barrier = true;
} else {
const layout_change = prev.layout != self.layout;
const src_read = prev.access.contains(Command.read_access);
const src_write = prev.access.contains(Command.write_access);
const dst_read = self.access.contains(Command.read_access);
const dst_write = self.access.contains(Command.write_access);
const src_read = prev.access.intersect(Command.read_access).toInt() != 0;
const src_write = prev.access.intersect(Command.write_access).toInt() != 0;
const dst_read = self.access.intersect(Command.read_access).toInt() != 0;
const dst_write = self.access.intersect(Command.write_access).toInt() != 0;
const read_after_write = src_write and dst_read;
const write_after_read = src_read and dst_write;
const write_after_write = src_write and dst_write;
if (layout_change or read_after_write or write_after_read or write_after_write or new_resource) {
// data hazard, insert barrier
const cmd = try Command.Buffer.get(self.owner, device);
const barrier: vk.ImageMemoryBarrier = .{
const barrier = vk.ImageMemoryBarrier{
.src_access_mask = prev.access,
.dst_access_mask = self.access,
.old_layout = prev.layout,

View File

@ -3,6 +3,7 @@ const vk = @import("vulkan");
const assert = std.debug.assert;
const ProfileMark = @import("../../common/profiler.zig").ProfileMark;
const time = @import("../../common/time.zig");
const framebuffer = @import("framebuffer.zig");
const Base = @import("Base.zig");
@ -44,7 +45,59 @@ pub fn finalize(device: *const Device) !void {
s_allocator.releasables.clearRetainingCapacity();
}
pub fn update() void {}
const pm_map = ProfileMark.init("memory.map");
pub fn map(allocation: vma.Allocation) ![*]u8 {
pm_map.begin();
defer pm_map.end();
assert(allocation != .Null);
return try s_allocator.handle.mapMemory(allocation, u8);
}
const pm_unmap = ProfileMark.init("memory.unmap");
pub fn unmap(allocation: vma.Allocation) void {
pm_unmap.begin();
defer pm_unmap.end();
assert(allocation != .Null);
s_allocator.handle.unmapMemory(allocation);
}
const pm_flush = ProfileMark.init("memory.flush");
pub fn flush(allocation: vma.Allocation) void {
pm_flush.begin();
defer pm_flush.end();
s_allocator.handle.flushAllocation(allocation, 0, vk.WHOLE_SIZE);
s_allocator.handle.invalidateAllocation(allocation, 0, vk.WHOLE_SIZE);
}
const UpdateError = SubmitId.PollError || FinalizeCheckError;
const pm_update = ProfileMark.init("memory.update");
pub fn update(device: *const Device) UpdateError!void {
pm_update.begin();
defer pm_update.end();
if (s_allocator.handle != .Null) {
const frame = time.getFrameCount();
s_allocator.handle.setCurrentFrameIndex(@intCast(u32, frame)); // XXX hmm
{
s_allocator.mutex.lock();
defer s_allocator.mutex.unlock();
if (s_allocator.releasables.items.len >= 1) {
var i = s_allocator.releasables.items.len - 1;
while (i >= 0) : (i -= 1) {
if (try s_allocator.releasables.items[i].submit_id.poll(device)) {
_ = s_allocator.releasables.swapRemove(i);
}
}
}
}
try finalizeCheck(device, s_allocator.releasables.items.len);
}
}
const pm_imgnew = ProfileMark.init("memory.imageNew");
pub fn imageNew(img: *image.Image, device: *const Device, info: vk.ImageCreateInfo, mem_usage: vma.MemoryUsage) !void {
@ -159,7 +212,7 @@ fn getBufferPool(usage: vk.BufferUsageFlags, mem_usage: vma.MemoryUsage) !vma.Po
assert(usage.intersect(pool_usage).toInt() != 0);
return s_allocator.staging_pool.handle;
},
.gpuToCpu => {
.gpuToCpu, .cpuToGpu => { // just a guess
const pool_usage = s_allocator.dynamic_buffer_pool.buffer_usage orelse unreachable;
assert(usage.intersect(pool_usage).toInt() != 0);
return s_allocator.dynamic_buffer_pool.handle;
@ -312,24 +365,32 @@ pub const Releasable = struct {
attachment: vk.ImageView,
};
frame_count: u64,
submit_id: SubmitId,
object: Union,
const pm_releasableinit = ProfileMark.init("memory.Releasable.init");
pub fn init(self: *const Self, device: *const Device) !void {
pub fn init(device: *const Device, frame_count: u64, id: SubmitId, object: Union) !Self {
pm_releasableinit.begin();
defer pm_releasableinit.end();
assert(id.valid);
assert(s_allocator.handle != .Null);
assert(self.submit_id.valid);
const self = Self{
.submit_id = id,
.object = object,
.frame_count = frame_count,
};
{
s_allocator.mutex.lock();
defer s_allocator.mutex.unlock();
try s_allocator.releasables.append(self.*);
try s_allocator.releasables.append(self);
}
try finalizeCheck(device, s_allocator.releasables.items.len);
return self;
}
const pm_releasabledeinit = ProfileMark.init("memory.Releasable.deinit");
@ -451,11 +512,12 @@ const Allocator = struct {
}
};
fn finalizeCheck(device: *const Device, size: usize) !void {
const FinalizeCheckError = Command.Buffer.FlushQueueTransfersError || SubmitId.WaitAllError || error{FinalizeUpdateFail};
fn finalizeCheck(device: *const Device, size: usize) FinalizeCheckError!void {
if (size >= 1024) { // TODO convar
std.debug.print("Too many gpu objects, force-finalizing\n", .{});
try Command.flush(device);
try SubmitId.waitAll(device);
update();
update(device) catch return error.FinalizeUpdateFail;
}
}

View File

@ -154,7 +154,7 @@ pub fn newGfx(device: *const Device, fixed_funcs: *const FixedFuncs, vert_layout
.logic_op_enable = 0,
.logic_op = .clear,
.blend_constants = std.mem.zeroes([4]f32),
.attachment_count = fixed_funcs.attachments.?.len,
.attachment_count = if (fixed_funcs.attachments) |att| @intCast(u32, att.len) else 0,
.p_attachments = &color_blend_attachments,
};

View File

@ -1,17 +1,21 @@
const std = @import("std");
const assert = std.debug.assert;
const vk = @import("vulkan");
const Device = @import("device.zig").Device;
const DeviceDispatch = @import("device.zig").DeviceDispatch;
const queues = @import("queues.zig");
const profiler = @import("../../common/profiler.zig");
const Fence = @import("sync.zig").Fence;
pub const SubmitId = packed struct {
counter: u32 = 0,
queue_id: queues.QueueId = .graphics,
valid: bool = false,
pub const PollError = Fence.StatError || DeviceDispatch.ResetCommandBufferError;
const pm_poll = profiler.ProfileMark.init("SubmitId.poll");
pub fn poll(self: *const SubmitId, device: *const Device) !bool {
pub fn poll(self: *const SubmitId, device: *const Device) PollError!bool {
pm_poll.begin();
defer pm_poll.end();
@ -25,7 +29,7 @@ pub const SubmitId = packed struct {
return false;
}
device.dispatch.resetCommandBuffer(queue.cmds[i], .{});
try device.dispatch.resetCommandBuffer(queue.cmds[i], .{});
queue.cmd_ids[i] = 0;
}
@ -52,6 +56,7 @@ pub const SubmitId = packed struct {
}
}
pub const WaitAllError = Fence.WaitError || DeviceDispatch.ResetCommandBufferError;
const pm_waitall = profiler.ProfileMark.init("SubmitId.waitAll");
pub fn waitAll(device: *const Device) !void {
pm_waitall.begin();

View File

@ -65,7 +65,8 @@ pub const Fence = struct {
}
}
pub fn stat(self: *const Self, device: *const Device) DeviceDispatch.GetFenceStatusError!State {
pub const StatError = DeviceDispatch.GetFenceStatusError;
pub fn stat(self: *const Self, device: *const Device) StatError!State {
return @intToEnum(State, @enumToInt(try device.dispatch.getFenceStatus(device.handle, self.handle)));
}
};