const std = @import("std"); const assert = @import("std").debug.assert; const vk = @import("vulkan"); const dev = @import("device.zig"); const vkd = dev.DeviceDispatch; const Renderer = @import("Renderer.zig"); const Command = @import("Command.zig"); // TODO memory const gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); const present_stages: vk.PipelineStageFlags = .all_commands_bit | .top_of_pipe_bit | .bottom_of_pipe_bit; const present_access: vk.PipelineAccessFlags = .memory_read_bit | .memory_write_bit; const graphics_stages: vk.PipelineStageFlags = .all_commands_bit | .all_graphics_bit | .top_of_pipe_bit | .bottom_of_pipe_bit | .draw_indirect_bit | .vertex_input_bit | .vertex_shader_bit | .tessellation_control_shader_bit | .tessellation_evaluation_shader_bit | .geometry_shader_bit | .fragment_shader_bit | .early_fragment_tests_bit | .late_fragment_tests_bit | .color_attachment_output_bit | .conditional_rendering_bit_ext | .task_shader_bit_nv | .mesh_shader_bit_nv; const graphics_access: vk.AccessFlags = .memory_read_bit | .memory_write_bit | .indirect_command_read_bit | .index_read_bit | .vertex_attribute_read_bit | .uniform_read_bit | .input_attachment_read_bit | .shader_read_bit | .shader_write_bit | .color_attachment_read_bit | .color_attachment_write_bit | .conditional_rendering_read_bit_ext | .acceleration_structure_read_bit_khr | .acceleration_structure_write_bit_khr; const compute_stages: vk.PipelineStageFlags = .all_commands_bit | .top_of_pipe_bit | .bottom_of_pipe_bit | .compute_shader_bit | .acceleration_structure_build_bit_khr | .ray_tracing_shader_bit_khr; const compute_access: vk.AccessFlags = .memory_read_bit | .memory_write_bit | .indirect_command_read_bit | .uniform_read_bit | .shader_read_bit | .shader_write_bit | .acceleration_structure_read_bit_khr | .acceleration_structure_write_bit_khr; const transfer_stages: vk.PipelineStageFlags = .all_commands_bit | .top_of_pipe_bit | .bottom_of_pipe_bit | .transfer_bit | .host_bit; const transfer_access: vk.AccessFlags = .memory_read_bit | .memory_write_bit | .host_read_bit | .host_write_bit | .transfer_read_bit | .transfer_write_bit; pub const QueueId = enum(u4) { graphics, compute, transfer, present, pub const count = @typeInfo(@This()).Enum.fields.len; }; const queues: [QueueId.count]Queue = undefined; pub fn init(device: *dev.Device, window: *dev.Window) !void { assert(device.phdev); assert(window.surface); const support = try QueueSupport.init(device.phdev, window.surface); defer support.deinit(); for (queues) |queue, i| { queue.init(&support, i); } } pub fn deinit(device: dev.Device) void { device.waitIdle(); for (queues) |queue| { queue.deinit(); } } pub fn get(id: QueueId) *Queue { assert(id < queues.len); return &queues[id]; } const QueueSupport = struct { const Self = @This(); families: [QueueId.count]?i32, indices: [QueueId.count]i32, properties: []vk.QueueFamilyProperties, pub fn init(device: dev.Device, window: dev.Window) !void { const self = Self{}; const count: i32 = undefined; vkd.physicalDeviceQueueFamilyProperties(device.phdev, &count, null); self.properties = allocator.alloc(vk.QueueFamilyProperties, count); vkd.physicalDeviceQueueFamilyProperties(device.phdev, &count, self.properties.ptr); self.families[QueueId.graphics] = selectGfxFamily(self.properties); self.families[QueueId.compute] = selectCompFamily(self.properties); self.families[QueueId.transfer] = selectXferFamily(self.properties); self.families[QueueId.present] = selectPresFamily(device.phdev, window.surface, self.properties); // TODO: don't understand the purpose of 'indices' yet... const choice_counts = allocator.alloc(i32, count); defer allocator.free(choice_counts); for (self.families) |family| { const choice_count = choice_counts[family]; self.indices = choice_count; choice_counts[family] += 1; } return self; } pub fn deinit(self: *Self) void { allocator.free(self.properties); } fn selectGfxFamily(families: []vk.QueueFamilyProperties) i32 { var index: ?i32 = null; var score: u32 = 0; for (families) |family, i| { if (family.queue_count == 0) { continue; } if (family.queue_flags & .graphics_bit) { var new_score: u32 = 0; new_score += if (family.queue_flags & .compute_bit) 1 else 0; new_score += if (family.queue_flags & .transfer_bit) 1 else 0; if (new_score > score) { score = new_score; index = i; } } } return index; } fn selectCompFamily(families: []vk.QueueFamilyProperties) i32 { var index: ?i32 = null; var score: u32 = 0; for (families) |family, i| { if (family.queue_count == 0) { continue; } if (family.queue_flags & .compute_bit) { var new_score: u32 = 0; new_score += if (family.queue_flags & .graphics_bit) 1 else 0; new_score += if (family.queue_flags & .transfer_bit) 1 else 0; if (new_score > score) { score = new_score; index = i; } } } return index; } fn selectXferFamily(families: []vk.QueueFamilyProperties) i32 { var index: ?i32 = null; var score: u32 = 0; for (families) |family, i| { if (family.queue_count == 0) { continue; } if (family.queue_flags & .transfer_bit) { var new_score: u32 = 0; new_score += if (family.queue_flags & .graphics_bit) 1 else 0; new_score += if (family.queue_flags & .compute_bit) 1 else 0; if (new_score > score) { score = new_score; index = i; } } } return index; } fn selectPresFamily(phdev: vk.PhysicalDevice, surf: vk.SurfaceKHR, families: []vk.QueueFamilyProperties) !i32 { var index: ?i32 = null; var score = 0; for (families) |family, i| { const presentable = try vkd.getPhysicalDeviceSurfaceSupportKHR(phdev, i, surf); if (presentable) { var new_score: u32 = 0; new_score += if (family.queue_flags & .graphics_bit) 1 else 0; new_score += if (family.queue_flags & .compute_bit) 1 else 0; new_score += if (family.queue_flags & .transfer_bit) 1 else 0; if (new_score > score) { score = new_score; index = i; } } } return index; } }; const Queue = packed struct { handle: vk.Queue, family: i32, index: i32, access_mask: vk.AccessFlags, stage_mask: vk.PipelineStageFlags, queueId: u4, gfx: bool, comp: bool, xfer: bool, pres: bool, cmd_pool: vk.CommandPool, cmds: [Renderer.cmds_per_queue]vk.CommandBuffer, cmd_fences: [Renderer.cmds_per_queue]vk.Fence, cmd_ids: [Renderer.cmds_per_queue]u32, head: u32, tail: u32, const Self = @This(); pub fn init(self: *Self, device: dev.Device, support: *QueueSupport, id: i32) !void { const family = support.family[id]; const index = support.index[id]; assert(family >= 0); assert(index >= 0); const handle = try vkd.getDeviceQueue(device, family, index); self.family = family; self.index = index; self.handle = handle; const pres_family = support.family[QueueId.present]; const queue_flags = support.properties.queue_flags; if (queue_flags & .graphics_bit) { self.gfx = true; self.stage_mask |= graphics_stages; self.access_mask |= graphics_access; } if (queue_flags & .compute_bit) { self.comp = true; self.stage_mask |= compute_stages; self.access_mask |= compute_access; } if (queue_flags & .transfer_bit) { self.xfer = true; self.stage_mask |= transfer_stages; self.access_mask |= transfer_access; } if (family == pres_family) { self.pres = true; self.stage_mask |= present_stages; self.access_mask |= present_access; } assert(self.stage_mask != 0); assert(self.access_mask != 0); try Command.init(self, id); } pub fn deinit(self: *Self) void { Command.deinit(self); } };