1
0
Fork 0
forked from vv/efemra
efemra/src/rendering/vulkan/Command.zig

176 lines
4.9 KiB
Zig

const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const vkd = @import("device.zig").DeviceDispatch;
const Device = @import("device.zig").Device;
const ProfileMark = @import("/common/profiler.zig").ProfileMark;
const Renderer = @import("Renderer.zig");
const SubmitId = @import("Swapchain.zig").SubmitId;
const Fence = @import("sync.zig").Fence;
const queues = @import("queues.zig");
const write_access: vk.AccessFlags =
.shader_write_bit |
.color_attachment_write_bit |
.depth_stencil_attachment_write_bit |
.transfer_write_bit |
.host_write_bit |
.memory_write_bit |
.transform_feedback_write_bit_ext |
.transform_feedback_counter_write_bit_ext |
.acceleration_structure_write_bit_khr |
.command_preprocess_write_bit_nv;
const read_access: vk.AccessFlags =
.indirect_command_read_bit |
.index_read_bit |
.vertex_attribute_read_bit |
.uniform_read_bit |
.input_attachment_read_bit |
.shader_read_bit |
.color_attachment_read_bit |
.shader_read_bit |
.depth_stencil_attachment_read_bit |
.transfer_read_bit |
.host_read_bit |
.memory_read_bit |
.transform_feedback_counter_read_bit_ext |
.conditional_rendering_read_bit_ext |
.color_attachment_read_noncoherent_bit_ext |
.acceleration_structure_read_bit_khr |
.fragment_density_map_read_bit_ext |
.fragment_shading_rate_attachment_read_bit_khr |
.command_preprocess_read_bit_nv;
const pm_init = ProfileMark("Command.init");
pub fn init(device: *Device, queue: queues.Queue, id: queues.QueueId) !void {
pm_init.start();
defer pm_init.end();
assert(queue.handle);
assert(queue == queues.get(id));
assert(device.dev);
assert(!queue.cmd_pool);
queue.queue_id = id;
queue.cmd_pool = try Pool.init(queue.family, .reset_cmd_buffer_bit);
errdefer queue.cmd_pool.deinit();
queue.cmds = try vkd.allocateCommandbuffers(device.dev, .{
.s_type = .command_buffer_allocate_info,
.command_pool = queue.cmd_pool,
.level = .primary,
.command_buffer_count = queue.cmds.len,
});
for (queue.cmds) |_, i| {
queue.cmd_fences[i] = try Fence.init(true);
queue.cmd_ids[i] = 0;
}
}
const pm_deinit = ProfileMark("Command.deinit");
pub fn deinit(queue: *queues.Queue, device: *Device) void {
pm_init.start();
defer pm_init.end();
assert(device.dev);
for (queue.cmd_fences) |fence| {
fence.deinit();
}
queue.cmd_pool.deinit();
}
pub fn getHeadSubmit(id: queues.QueueId) SubmitId {
// TODO
_ = id;
}
pub const Buffer = packed struct {
const Self = @This();
handle: vk.CommandBuffer,
fence: vk.Fence,
id: u32,
queue_id: queues.QueueId,
// indicates which types of cmds are legal in this cmdbuf
gfx: bool,
comp: bool,
xfer: bool,
pres: bool,
// cmd state
began: bool,
ended: bool,
submit: bool,
is_render_pass: bool,
subpass: u8,
queue_transfer_src: bool,
queue_transfer_dst: bool,
// XXX: I do not really understand this?
pub fn init(self: *Self, queue: queues.Queue) !void {
const ring_mask: u32 = queue.cmds.len - 1;
// TODO: decode meaning of SASSERT((NELEM(queue->cmds) & (NELEM(queue->cmds) - 1u)) == 0);
const head_id = queue.head;
queue.head += 1;
while (head_id - queue.tail >= queue.cmds.len) {
const tail_slot = queue.tail & ring_mask;
assert(queue.cmd_ids[tail_slot] == queue.tail);
const tail_fence = queue.fences[tail_slot];
try tail_fence.wait();
queue.ids[tail_slot] = 0;
queue.tail += 1;
}
const head_slot = head_id & ring_mask;
assert(!queue.ids[head_slot]);
queue.ids[head_slot] = head_id;
const cmd = queue.cmds[head_slot];
const fence = queue.fences[head_slot];
assert(fence);
assert(cmd);
assert(fence.stat() == .signaled);
fence.reset();
self.handle = cmd;
self.fence = fence;
self.id = head_id;
self.queue_id = queue.queue_id;
self.gfx = queue.gfx;
self.comp = queue.comp;
self.xfer = queue.xfer;
self.pres = queue.pres;
}
};
pub const Pool = struct {
const Self = @This();
handle: vk.CommandPool,
pub fn init(device: *Device, family: vk.QueueFamilyProperties, flags: vk.CommandPoolCreateFlags) !Self {
const handle = try vkd.createCommandPool(device.dev, &.{
.s_type = .command_pool_create_info,
.flags = flags,
.queue_family_index = family,
}, null);
return Self{
.handle = handle,
};
}
pub fn deinit(self: *Self, device: *Device) void {
vkd.destroyCommandPool(device.dev, self.handle, null);
}
pub fn reset(self: *Self, device: *Device, flags: vk.CommandPoolResetFlagBits) !void {
try vkd.resetCommandPool(device.dev, self.handle, flags);
}
};