so many compiler errors, oof

This commit is contained in:
Vivianne 2022-07-11 01:17:47 -07:00
parent b9b7f21ef0
commit 69828a3e69
24 changed files with 1327 additions and 907 deletions

3
.ignore Normal file
View File

@ -0,0 +1,3 @@
.git/
zig-out/
!zig-cache/vk.zig

View File

@ -98,7 +98,7 @@ pub fn build(b: *std.build.Builder) void {
res.addShader("triangle_frag", "src/shaders/triangle.frag");
exe.addPackage(res.package);
build_vma.linkVma(exe, gen.output_file.getPath(), mode, target);
build_vma.link(exe, gen.output_file.getPath(), mode, target);
build_imgui.link(exe);
exe.install();

View File

@ -1,8 +1,8 @@
git https://github.com/Snektron/vulkan-zig.git fdf43d846a4c3ff4d977bd4395898e4e16ca62cf generator/main.zig fdf43d846a4c3ff4d977bd4395898e4e16ca62cf
git https://github.com/hexops/mach-glfw.git 99bf7df61b9afe7c80aae29d664a566f42a8db35 src/main.zig 99bf7df61b9afe7c80aae29d664a566f42a8db35
git https://github.com/viviicat/Zig-VMA.git zig-0.10 vma.zig 4eb07330e875fb5109f855ae901f3e21df95c897
git https://github.com/Snektron/vulkan-zig.git fdf43d846a4c3ff4d977bd4395898e4e16ca62cf generator/index.zig fdf43d846a4c3ff4d977bd4395898e4e16ca62cf
git https://github.com/hexops/mach-glfw.git 99bf7df61b9afe7c80aae29d664a566f42a8db35 build.zig 99bf7df61b9afe7c80aae29d664a566f42a8db35
git https://github.com/viviicat/Zig-VMA.git zig-0.10 build.zig 4eb07330e875fb5109f855ae901f3e21df95c897
git https://github.com/SpexGuy/Zig-ImGui.git 0a2cfca89de2ef1ff5a346c6e2c29e8b3347d2e3 zig-imgui/imgui.zig 0a2cfca89de2ef1ff5a346c6e2c29e8b3347d2e3
git https://github.com/SpexGuy/Zig-ImGui.git 0a2cfca89de2ef1ff5a346c6e2c29e8b3347d2e3 zig-imgui/imgui_build.zig 0a2cfca89de2ef1ff5a346c6e2c29e8b3347d2e3
git https://github.com/SpexGuy/Zig-VMA.git 1f74653e44ecfe98da66059f2bb13033dfabc0df vma.zig 1f74653e44ecfe98da66059f2bb13033dfabc0df
git https://github.com/SpexGuy/Zig-VMA.git 1f74653e44ecfe98da66059f2bb13033dfabc0df vma_build.zig 1f74653e44ecfe98da66059f2bb13033dfabc0df

View File

@ -9,16 +9,16 @@ deps:
url: "https://github.com/hexops/mach-glfw.git"
ref: 99bf7df61b9afe7c80aae29d664a566f42a8db35
root: src/main.zig
vma:
git:
url: "https://github.com/viviicat/Zig-VMA.git"
ref: zig-0.10
root: vma.zig
imgui:
git:
url: "https://github.com/SpexGuy/Zig-ImGui.git"
ref: 0a2cfca89de2ef1ff5a346c6e2c29e8b3347d2e3
root: zig-imgui/imgui.zig
vma:
git:
url: "https://github.com/SpexGuy/Zig-VMA.git"
ref: 1f74653e44ecfe98da66059f2bb13033dfabc0df
root: vma.zig
build_deps:
build_vulkan:
git:
@ -30,13 +30,13 @@ build_deps:
url: "https://github.com/hexops/mach-glfw.git"
ref: 99bf7df61b9afe7c80aae29d664a566f42a8db35
root: build.zig
build_vma:
git:
url: "https://github.com/viviicat/Zig-VMA.git"
ref: zig-0.10
root: build.zig
build_imgui:
git:
url: "https://github.com/SpexGuy/Zig-ImGui.git"
ref: 0a2cfca89de2ef1ff5a346c6e2c29e8b3347d2e3
root: zig-imgui/imgui_build.zig
build_vma:
git:
url: "https://github.com/SpexGuy/Zig-VMA.git"
ref: 1f74653e44ecfe98da66059f2bb13033dfabc0df
root: vma_build.zig

View File

@ -32,7 +32,7 @@ pub fn main() !void {
while (!window.shouldClose()) {
std.debug.print("hi", .{});
try renderer.drawFrame();
renderer.update();
try glfw.pollEvents();
}
}

View File

@ -0,0 +1,15 @@
const vk = @import("vulkan");
const glfw = @import("glfw");
pub const BaseDispatch = vk.BaseWrapper(.{
.createInstance = true,
.enumerateInstanceLayerProperties = true,
.enumerateInstanceExtensionProperties = true,
});
pub const vk_proc = @ptrCast(fn (instance: vk.Instance, procname: [*:0]const u8) callconv(.C) vk.PfnVoidFunction, glfw.getInstanceProcAddress);
pub var dispatch: BaseDispatch = undefined;
pub fn init() !void {
dispatch = try BaseDispatch.load(vk_proc);
}

View File

@ -1,86 +1,83 @@
const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const vkd = @import("device.zig").DeviceDispatch;
const settings = @import("settings.zig");
const Device = @import("device.zig").Device;
const ProfileMark = @import("/common/profiler.zig").ProfileMark;
const ProfileMark = @import("../../common/profiler.zig").ProfileMark;
const Renderer = @import("Renderer.zig");
const SubmitId = @import("Swapchain.zig").SubmitId;
const SubmitId = @import("swapchain.zig").SubmitId;
const Fence = @import("sync.zig").Fence;
const queues = @import("queues.zig");
const write_access: vk.AccessFlags =
.shader_write_bit |
.color_attachment_write_bit |
.depth_stencil_attachment_write_bit |
.transfer_write_bit |
.host_write_bit |
.memory_write_bit |
.transform_feedback_write_bit_ext |
.transform_feedback_counter_write_bit_ext |
.acceleration_structure_write_bit_khr |
.command_preprocess_write_bit_nv;
const read_access: vk.AccessFlags =
.indirect_command_read_bit |
.index_read_bit |
.vertex_attribute_read_bit |
.uniform_read_bit |
.input_attachment_read_bit |
.shader_read_bit |
.color_attachment_read_bit |
.shader_read_bit |
.depth_stencil_attachment_read_bit |
.transfer_read_bit |
.host_read_bit |
.memory_read_bit |
.transform_feedback_counter_read_bit_ext |
.conditional_rendering_read_bit_ext |
.color_attachment_read_noncoherent_bit_ext |
.acceleration_structure_read_bit_khr |
.fragment_density_map_read_bit_ext |
.fragment_shading_rate_attachment_read_bit_khr |
.command_preprocess_read_bit_nv;
const write_access: vk.AccessFlags = .{
.shader_write_bit = true,
.color_attachment_write_bit = true,
.depth_stencil_attachment_write_bit = true,
.transfer_write_bit = true,
.host_write_bit = true,
.memory_write_bit = true,
.transform_feedback_write_bit_ext = true,
.transform_feedback_counter_write_bit_ext = true,
.acceleration_structure_write_bit_khr = true,
.command_preprocess_write_bit_nv = true,
};
const read_access: vk.AccessFlags = .{
.indirect_command_read_bit = true,
.index_read_bit = true,
.vertex_attribute_read_bit = true,
.uniform_read_bit = true,
.input_attachment_read_bit = true,
.shader_read_bit = true,
.color_attachment_read_bit = true,
.shader_read_bit = true,
.depth_stencil_attachment_read_bit = true,
.transfer_read_bit = true,
.host_read_bit = true,
.memory_read_bit = true,
.transform_feedback_counter_read_bit_ext = true,
.conditional_rendering_read_bit_ext = true,
.color_attachment_read_noncoherent_bit_ext = true,
.acceleration_structure_read_bit_khr = true,
.fragment_density_map_read_bit_ext = true,
.fragment_shading_rate_attachment_read_bit_khr = true,
.command_preprocess_read_bit_nv = true,
};
const pm_init = ProfileMark("Command.init");
pub fn init(device: *Device, queue: queues.Queue, id: queues.QueueId) !void {
pm_init.start();
const pm_init = ProfileMark.init("Command.init");
pub fn init(device: *const Device, queue: *queues.Queue, id: queues.QueueId) !void {
try pm_init.begin();
defer pm_init.end();
assert(queue.handle);
assert(queue == queues.get(id));
assert(device.dev);
assert(!queue.cmd_pool);
queue.queue_id = id;
queue.cmd_pool = try Pool.init(queue.family, .reset_cmd_buffer_bit);
errdefer queue.cmd_pool.deinit();
queue.index = id;
queue.cmd_pool = try Pool.init(device, queue.family, .{ .reset_command_buffer_bit = true });
errdefer queue.cmd_pool.deinit(device);
queue.cmds = try vkd.allocateCommandbuffers(device.dev, .{
try device.dispatch.allocateCommandBuffers(device.handle, &.{
.s_type = .command_buffer_allocate_info,
.command_pool = queue.cmd_pool,
.command_pool = queue.cmd_pool.handle,
.level = .primary,
.command_buffer_count = queue.cmds.len,
});
.command_buffer_count = settings.cmds_per_queue,
}, &queue.cmds);
for (queue.cmds) |_, i| {
queue.cmd_fences[i] = try Fence.init(true);
queue.cmd_fences[i] = try Fence.init(device, true);
queue.cmd_ids[i] = 0;
}
}
const pm_deinit = ProfileMark("Command.deinit");
pub fn deinit(queue: *queues.Queue, device: *Device) void {
pm_init.start();
pub fn deinit(device: *const Device, queue: *const queues.Queue) void {
pm_init.begin() catch {};
defer pm_init.end();
assert(device.dev);
for (queue.cmd_fences) |fence| {
fence.deinit();
fence.deinit(device);
}
queue.cmd_pool.deinit();
queue.cmd_pool.deinit(device);
}
pub fn getHeadSubmit(id: queues.QueueId) SubmitId {
@ -153,8 +150,8 @@ pub const Pool = struct {
handle: vk.CommandPool,
pub fn init(device: *Device, family: vk.QueueFamilyProperties, flags: vk.CommandPoolCreateFlags) !Self {
const handle = try vkd.createCommandPool(device.dev, &.{
pub fn init(device: *const Device, family: u32, flags: vk.CommandPoolCreateFlags) !Self {
const handle = try device.dispatch.createCommandPool(device.handle, &.{
.s_type = .command_pool_create_info,
.flags = flags,
.queue_family_index = family,
@ -165,11 +162,11 @@ pub const Pool = struct {
};
}
pub fn deinit(self: *Self, device: *Device) void {
vkd.destroyCommandPool(device.dev, self.handle, null);
pub fn deinit(self: *const Self, device: *const Device) void {
device.dispatch.destroyCommandPool(device.handle, self.handle, null);
}
pub fn reset(self: *Self, device: *Device, flags: vk.CommandPoolResetFlagBits) !void {
try vkd.resetCommandPool(device.dev, self.handle, flags);
pub fn reset(self: *const Self, device: *const Device, flags: vk.CommandPoolResetFlagBits) !void {
try device.dispatch.resetCommandPool(device.handle, self.handle, flags);
}
};

View File

@ -1,20 +1,21 @@
const vk = @import("vulkan");
const CommandBuffer = @import("Command.zig").CommandBuffer;
const CommandBuffer = @import("Command.zig").Buffer;
const QueueId = @import("queues.zig").QueueId;
cur_cmd_buf: [QueueId.count]CommandBuffer,
prev_cmd_buf: [QueueId.count]CommandBuffer,
last_submit_queue: QueueId,
most_recent_begin: QueueId,
cur_cmd_buf: [QueueId.count]CommandBuffer = undefined,
prev_cmd_buf: [QueueId.count]CommandBuffer = undefined,
last_submit_queue: ?QueueId = null,
most_recent_begin: ?QueueId = null,
const Self = @This();
pub const context: Self = Self{};
pub var context: Self = Self{};
pub fn init() Self {
context.* = .{};
context = .{};
return context;
}
pub fn deinit() void {
context.* = .{};
context = .{};
}

View File

@ -5,34 +5,47 @@ const Instance = @import("instance.zig").Instance;
const Self = @This();
messenger: vk.DebugUtilsMessengerCreateInfoEXT,
messenger: if (settings.messenger_on) vk.DebugUtilsMessengerEXT else void,
pub fn init(instance: *Instance) !Self {
const self = Self{};
if (settings.messenger_on) {
self.messenger = try vk.CreateDebugUtilsCreateMessengerEXT(instance.vkInst, .{
const messenger = try instance.dispatch.createDebugUtilsMessengerEXT(instance.handle, &.{
.s_type = .debug_utils_messenger_create_info_ext,
.message_severity = .error_bit_ext | .warning_bit_ext | .info_bit_ext,
.message_type = .general_bit_ext | .validation_bit_ext | .performance_bit_ext,
.flags = .{},
.message_severity = .{
.error_bit_ext = true,
.warning_bit_ext = true,
.info_bit_ext = true,
},
.message_type = .{
.general_bit_ext = true,
.validation_bit_ext = true,
.performance_bit_ext = true,
},
.pfn_user_callback = onVulkanMessage,
.p_user_data = null,
}, null);
return Self{ .messenger = messenger };
} else {
return Self{};
}
return self;
}
pub fn deinit(self: *Self, instance: *Instance) void {
pub fn deinit(self: *const Self, instance: *const Instance) void {
if (settings.messenger_on) {
vk.destroyDebugUtilsMessengerEXT(instance.vkInst, self.messenger, null);
instance.dispatch.destroyDebugUtilsMessengerEXT(instance.handle, self.messenger, null);
}
}
fn onVulkanMessage(message_severity: vk.DebugUtilsMessengerSeverityFlagBitsEXT, message_type: vk.DebugUtilsMessageTypeFlagsEXT, callback_data: *const vk.DebugUtilsMessengerCallbackDataEXT, user_data: *anyopaque) bool {
fn onVulkanMessage(message_severity: u32, message_type: u32, callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, user_data: ?*anyopaque) callconv(.C) u32 {
//var shouldLog = false;
// TODO: log severity
_ = message_severity;
_ = message_type;
_ = user_data;
std.debug.print("vkr: {s}\n", callback_data.p_message);
if (callback_data) |data| {
std.debug.print("vkr: {s}\n", .{data.p_message});
}
return 0;
}

View File

@ -1,17 +1,17 @@
const std = @import("std");
const vk = @import("vulkan");
const settings = @import("settings");
const settings = @import("settings.zig");
pub const Device = struct {
khr_swapchain: bool,
ext_memory_budget: bool,
ext_hdr_metadata: bool,
khr_shader_float16_int8: bool,
khr_16bit_storage: bool,
khr_shader_float_16_int_8: bool,
khr_1_6bit_storage: bool,
khr_push_descriptor: bool,
ext_memory_priority: bool,
khr_bind_memory2: bool,
khr_bind_memory_2: bool,
khr_shader_float_controls: bool,
khr_spirv_1_4: bool,
ext_conditional_rendering: bool,
@ -25,13 +25,13 @@ pub const Device = struct {
return getExtensions(@This(), props);
}
pub fn toArray(self: *Device, allocator: std.mem.Allocator) []u8 {
return toArray(Device, self, allocator);
pub fn toArray(allocator: std.mem.Allocator) [][*:0]const u8 {
return extToArray(@This(), allocator);
}
};
pub const Instance = struct {
khr_get_physical_device_properties2: bool,
khr_get_physical_device_properties_2: bool,
ext_swapchain_colorspace: bool,
ext_debug_utils: if (settings.messenger_on) bool else void,
@ -39,28 +39,35 @@ pub const Instance = struct {
return getExtensions(@This(), props);
}
pub fn toArray(self: *Instance, allocator: std.mem.Allocator) []u8 {
return extToArray(Instance, self, allocator);
pub fn toArray(allocator: std.mem.Allocator) [][*:0]const u8 {
return extToArray(@This(), allocator);
}
};
fn getExtensions(comptime T: type, props: []vk.ExtensionProperties) T {
const ext: T = undefined;
for (@typeInfo(T).Struct.fields) |field| {
var ext: T = undefined;
inline for (@typeInfo(T).Struct.fields) |field| {
if (field.field_type == void) {
continue;
}
for (props) |prop| {
if (std.ascii.eqlIgnoreCase([*:0]u8, prop.extension_name, "vk_" ++ field.name)) {
const ext_name = std.mem.sliceTo(&prop.extension_name, 0);
if (std.ascii.eqlIgnoreCase(ext_name, "vk_" ++ field.name)) {
@field(ext, field.name) = true;
}
}
}
return ext;
}
pub fn extToArray(comptime T: type, allocator: std.mem.Allocator) []u8 {
const arr = allocator.alloc([]u8, @typeInfo(T).Struct.fields.len);
for (@typeInfo(T).Struct.fields) |field, i| {
const name_info: vk.extension_info.Info = @field(vk.extension_info, field.name);
pub fn extToArray(comptime T: type, allocator: std.mem.Allocator) [][*:0]const u8 {
const arr = allocator.alloc([*:0]const u8, @typeInfo(T).Struct.fields.len) catch unreachable;
comptime for (@typeInfo(T).Struct.fields) |field, i| {
const name_info = @field(vk.extension_info, field.name);
arr[i] = name_info.name;
}
};
return arr;
}

View File

@ -1,12 +1,13 @@
const std = @import("std");
const Base = @import("Base.zig");
const Instance = @import("instance.zig").Instance;
const Display = @import("display.zig").Display;
const Window = @import("display.zig").Window;
const Device = @import("device.zig").Device;
const Memory = @import("memory.zig").Memory;
const Framebuffer = @import("framebuffer.zig").Framebuffer;
const Swapchain = @import("Swapchain.zig");
const memory = @import("memory.zig");
const framebuffer = @import("framebuffer.zig");
const Swapchain = @import("swapchain.zig").Swapchain;
const Context = @import("Context.zig");
//const MainPass = @import("main_pass.zig").MainPass;
const Command = @import("Command.zig");
@ -14,8 +15,6 @@ const Command = @import("Command.zig");
instance: Instance,
window: Window,
device: Device,
memory: Memory,
framebuffer: Framebuffer,
swapchain: Swapchain,
context: Context,
// sampler: Sampler, TODO
@ -29,18 +28,32 @@ context: Context,
const Self = @This();
pub fn init() !Self {
const self = Self{};
errdefer {
std.debug.print("failed to init VulkanRenderer", .{});
}
errdefer std.debug.print("failed to init VulkanRenderer", .{});
errdefer self.deinit();
try Base.init();
try self.instance.init();
try windowInit(&self.window);
try self.device.init();
try self.memory.init();
try self.framebuffer.init();
try self.swapchain.init(null);
try self.context.init();
const instance = try Instance.init();
errdefer instance.deinit();
const window = try windowInit(&instance);
errdefer window.deinit(&instance);
const device = try Device.init(&instance, &window);
errdefer device.deinit();
try memory.init(&instance, &device);
errdefer memory.deinit(&device);
try framebuffer.init();
errdefer framebuffer.deinit(&device);
const swapchain = try Swapchain.init(&instance, &window, &device, null);
errdefer swapchain.deinit();
const context = Context.init();
errdefer context.deinit();
// try self.sampler.init();
// try self.texTable.init();
// try self.bindings.init();
@ -49,17 +62,23 @@ pub fn init() !Self {
// try self.imSys.init();
//try self.main_pass.init();
return self;
return Self{
.instance = instance,
.window = window,
.device = device,
.swapchain = swapchain,
.context = context,
};
}
pub fn update(self: Self) bool {
pub fn update(self: Self) void {
// TODO profiling
// base system update
{
self.swapchain.acquireSync();
try self.swapchain.acquireSync(&self.device);
self.swapchain.acquireImage();
self.memory.update();
memory.update();
}
// system update
@ -89,7 +108,7 @@ pub fn update(self: Self) bool {
}
pub fn deinit(self: Self) void {
self.device.waitIdle();
self.device.waitIdle() catch {};
// TODO: delete lightmap pack
@ -103,26 +122,27 @@ pub fn deinit(self: Self) void {
// self.sampler.deinit();
// clear other passes here.
self.memory.finalize();
memory.finalize(&self.device) catch {};
self.context.deinit();
self.swapchain.deinit();
self.framebuffer.deinit();
self.window.deinit();
self.memory.deinit();
Context.deinit();
self.swapchain.deinit(&self.device);
framebuffer.deinit(&self.device);
self.window.deinit(&self.instance);
memory.deinit(&self.device);
self.device.deinit();
self.instance.deinit();
}
fn windowInit(window: *Window) !void {
fn windowInit(instance: *const Instance) !Window {
// TODO: convar cv_fullscreen
const fullscreen = false;
const extents = try Display.getSize(fullscreen);
try window.init("efemra", extents, fullscreen);
const window = try Window.init(instance, "efemra", extents.width, extents.height, fullscreen);
// TODO: convar r_width/r_height set
// TODO: UISys_Init
return window;
}
fn windowDeinit(window: *Window) void {

View File

@ -1,256 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const math = @import("std").math;
const settings = @import("settings.zig");
const dev = @import("device.zig");
const vkd = dev.DeviceDispatch;
const sync = @import("sync.zig");
const queues = @import("queues.zig");
const Image = @import("image.zig").Image;
// TODO memory
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const preferred_present_modes = [_]vk.PresentModeKHR{
.mailbox_khr,
.immediate_khr,
.fifo_relaxed_khr,
.fifo_khr,
};
const preferred_surface_formats = [_]vk.PreferredSurfaceFormats{
// PQ Rec2100
// https://en.wikipedia.org/wiki/Rec._2100
// https://en.wikipedia.org/wiki/High-dynamic-range_video#Perceptual_quantizer
if (settings.hdr_on) .{ .a2r10g10b10_unorm_pack32, .hdr10_st2084_ext } else void,
if (settings.hdr_on) .{ .a2b10g10r10_unorm_pack32, .hdr10_st2084_ext } else void,
// 10-bit sRGB
if (settings.hdr_on) .{ .a2r10g10b10_unorm_pack32, .srgb_nonlinear_khr } else void,
if (settings.hdr_on) .{ .a2b10g10r10_unorm_pack32, .srgb_nonlinear_khr } else void,
// 8 bit sRGB
.{ .r8g8b8a8_srgb, .srgb_nonlinear_khr },
.{ .b8g8r8a8_srgb, .srgb_nonlinear_khr },
};
const Support = struct {
const Self = @This();
caps: vk.SurfaceCapabilitiesKHR,
format_count: u32,
formats: []vk.SurfaceFormatKHR,
mode_count: u32,
modes: []vk.PresentModeKHR,
pub fn init(device: *dev.Device, window: *dev.Window) !Self {
const self = Self{};
self.capabilities = try vkd.getPhysicalDeviceSurfaceCapabilitiesKHR(device.dev, window.surface);
try vkd.getPhysicalDeviceSurfaceFormatsKHR(device.dev, window.surface, &self.format_count, null);
self.formats = allocator.alloc(vk.SurfaceFormatKHR, self.format_count);
errdefer allocator.free(self.formats);
try vkd.getPhysicalDeviceSurfaceFormatsKHR(device.dev, window.surface, &self.format_count, &self.formats);
try vkd.getPhysicalDeviceSurfacePresentModesKHR(device.dev, window.surface, &self.mode_count);
self.modes = allocator.alloc(vk.PresentModeKHR, self.format_count);
errdefer allocator.free(self.modes);
try vkd.getPhysicalDeviceSurfacePresentModesKHR(device.dev, window.surface, &self.mode_count, &self.modes);
return self;
}
pub fn deinit(self: *Self) void {
allocator.free(self.formats);
}
pub fn selectSwapFormat(self: *Self) !vk.SurfaceFormatKHR {
if (self.formats.len > 0) {
return error.NoSurfaceFormats;
}
for (preferred_surface_formats) |preferred_format| {
for (self.formats) |format| {
if (std.mem.eql(vk.SurfaceFormatKHR, format, preferred_format)) {
return format;
}
}
}
return self.formats[0];
}
pub fn selectSwapMode(self: *Self) !vk.PresentModeKHR {
if (self.modes.len <= 0) {
return error.NoPresentModes;
}
for (preferred_present_modes) |preferred_mode| {
for (self.modes) |mode| {
if (preferred_mode == mode) {
return mode;
}
}
}
return self.modes[0];
}
pub fn selectSwapExtent(self: *Self, window_width: i32, window_height: i32) vk.Extent2D {
if (self.capabilities.current_extent.width != ~0) {
return self.capabilities.current_extent;
}
const min_ext = self.capabilities.min_image_extent;
const max_ext = self.capabilities.max_image_extent;
return vk.Extent2D{
.width = math.clamp(window_width, min_ext.width, max_ext.width),
.height = math.clamp(window_height, min_ext.height, max_ext.height),
};
}
};
pub const SubmitId = packed struct {
counter: u32,
queue_id: u4,
valid: bool,
};
pub const Swapchain = struct {
const Self = @This();
handle: vk.SwapchainKHR,
color_format: vk.Format,
color_space: vk.ColorSpaceKHR,
mode: vk.PresentModeKHR,
width: i32,
height: i32,
length: i32,
image_index: u32,
image_submits: [settings.max_swapchain_len]SubmitId,
images: [settings.max_swapchain_len]Image,
sync_index: u32,
sync_submits: [settings.resource_sets]SubmitId,
available_semas: [settings.resource_sets]vk.Semaphore,
rendered_semas: [settings.resource_sets]vk.Semaphore,
pub fn init(window: *dev.Window, device: *dev.Device, previous: ?*Self) !void {
const support = try Support.init(dev, window.surface);
defer support.deinit();
const queue_support = try queues.QueueSupport.init(device, window);
defer queue_support.deinit();
const format = try support.selectSwapFormat();
const mode = try support.selectSwapMode();
const ext = support.selectSwapExtent(window);
const img_count = math.clamp(settings.desired_swapchain_len, support.capabilities.min_image_count, math.min(settings.max_swapchain_len, support.capabilities.max_image_count));
const families = [_]u32{
queue_support.family[queues.QueueId.graphics],
queue_support.family[queues.QueueId.present],
};
const concurrent = families[0] != families[1];
const usage: vk.ImageUsage = .color_attachment_bit;
const swap_info = vk.SwapInfo{
.s_type = .swapchain_create_info_khr,
.surface = window.surface,
.present_mode = mode,
.min_image_count = img_count,
.image_format = format.format,
.image_color_space = format.color_space,
.image_extent = ext,
.image_array_layers = 1,
.image_usage = usage,
.image_sharing_mode = if (concurrent) .concurrent else .exclusive,
.queue_family_index_count = if (concurrent) families.len else 0,
.p_queue_family_indices = families,
.pre_transform = support.capabilities.current_transform,
// no compositing with window manager / desktop background
.composite_alpha = .opaque_bit_khr,
// don't render pixels behind other windows
.clipped = true,
// prev swapchain, if recreating
.old_swapchain = previous.handle orelse null,
};
const handle = try vkd.createSwapchainKHR(device.dev, &swap_info, null);
if (previous == null) {
std.debug.print("Present mode: '{s}'", @tagName(mode));
std.debug.print("Present extent: '{} x {}'", ext.width, ext.height);
std.debug.print("Present images: '{}'", img_count);
std.debug.print("Present sharing mode: '{s}'", if (concurrent) "Concurrent" else "Exclusive");
std.debug.print("Color space: '{s}'", @tagName(format.color_space));
std.debug.print("Format: '{s}'", @tagName(format.format));
}
const images = [settings.max_swapchain_len]vk.Images{};
try vkd.getSwapchainImagesKHR(device.dev, handle, &img_count, null);
if (img_count > settings.max_swapchain_len) {
std.debug.assert(false);
vkd.destroySwapchainKHR(device.dev, handle, null) catch {};
return error.TooManyImages;
}
try vkd.getSwapchainImagesKHR(device.dev, handle, &img_count, images);
const self = Self{
.handle = handle,
.mode = mode,
.color_format = format.format,
.color_space = format.color_space,
.width = ext.width,
.height = ext.height,
.length = img_count,
};
for (images) |img, i| {
try Image.import(&self.images[i], &.{
.s_type = .image_create_info,
.image_type = .@"2d",
.format = self.color_format,
.extent = .{
.width = ext.width,
.height = ext.height,
.depth = 1,
},
.mip_levels = 1,
.array_layers = 1,
.samples = .@"1_bit",
.tiling = .optimal,
.usage = swap_info.image_sharing_mode,
.queue_family_index_count = swap_info.queue_family_index_count,
.p_queue_family_indices = swap_info.p_queue_family_indices,
.initial_layout = .undefined,
}, img);
}
{
var i = 0;
while (i < settings.resource_sets) : (i += 1) {
self.available_semas[i] = try sync.Semaphore.init();
self.rendered_semas[i] = try sync.Semaphore.init();
}
}
return self;
}
pub fn deinit(self: *Self, device: *dev.Device) void {
device.waitIdle();
for (self.available_semas) |_, i| {
self.available_semas[i].deinit();
self.rendered_semas[i].deinit();
}
for (self.images) |image| {
image.deinit();
}
}
};

View File

@ -1,5 +0,0 @@
const vk = @import("vulkan");
const BaseDispatch = vk.BaseWrapper(.{
.createInstance = true,
});

View File

@ -7,16 +7,15 @@ const OnlyIf = settings.OnlyIf;
const Extensions = @import("Extensions.zig");
const Instance = @import("instance.zig").Instance;
const layers = @import("layers.zig").layers;
const enabled_layers = @import("layers.zig").enabled;
const Renderer = @import("Renderer.zig");
const Window = @import("device.zig").Window;
const Window = @import("display.zig").Window;
const queues = @import("queues.zig");
const vki = @import("instance.zig").InstanceDispatch;
const std = @import("std");
// TODO memory
const gpa = std.heap.GeneralPurposeAllocator(.{}){};
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const DeviceDispatch = vk.DeviceWrapper(.{
@ -54,6 +53,7 @@ const DeviceDispatch = vk.DeviceWrapper(.{
.destroyFramebuffer = true,
.beginCommandBuffer = true,
.endCommandBuffer = true,
.resetCommandBuffer = true,
.allocateMemory = true,
.freeMemory = true,
.createBuffer = true,
@ -70,55 +70,58 @@ const DeviceDispatch = vk.DeviceWrapper(.{
.cmdSetScissor = true,
.cmdBindVertexBuffers = true,
.cmdCopyBuffer = true,
.getFenceStatus = true,
});
const vkd = DeviceDispatch;
const Props = struct {
phdev: vk.PhysicalDeviceProperties2,
accstr: OnlyIf(settings.rt_on, vk.PhysicalDeviceAccelerationStructurePropertiesKHR),
rtpipe: OnlyIf(settings.rt_on, vk.PhysicalDeviceRaytracingPipelinePropertiesKHR),
rtpipe: OnlyIf(settings.rt_on, vk.PhysicalDeviceRayTracingPipelinePropertiesKHR),
pub fn getName(self: *Props) []u8 {
return self.phdev.device_name;
return std.mem.sliceTo(&self.phdev.properties.device_name, 0);
}
};
const Features = struct {
phdev: vk.PhysicalDeviceFeatures2,
accstr: OnlyIf(settings.rt_on, vk.PhysicalDeviceAccelerationStructureFeaturesKHR),
rtpipe: OnlyIf(settings.rt_on, vk.PhysicalDeviceRaytracingPipelineFeaturesKHR),
rquery: OnlyIf(settings.rt_on, vk.PhysicalDeviceRayQueryFeaturesKHR),
accstr: OnlyIf(settings.rt_on, vk.PhysicalDeviceAccelerationStructureFeaturesKHR) = undefined,
rtpipe: OnlyIf(settings.rt_on, vk.PhysicalDeviceRayTracingPipelineFeaturesKHR) = undefined,
rquery: OnlyIf(settings.rt_on, vk.PhysicalDeviceRayQueryFeaturesKHR) = undefined,
};
const Device = struct {
pub const Device = struct {
const Self = @This();
dev: vk.Device,
phdev: vk.PhysicalDevice,
props: Props,
exts: Extensions.Device,
feats: Features,
physical_device: vk.PhysicalDevice = undefined,
handle: vk.Device = undefined,
dispatch: DeviceDispatch = undefined,
props: Props = undefined,
exts: Extensions.Device = undefined,
feats: Features = undefined,
pub fn init(renderer: *Renderer) !void {
const self = Self{};
pub fn init(instance: *const Instance, window: *const Window) !Self {
var self = Self{};
try selectPhysicalDevice(&self, instance, window);
try self.createDevice(renderer);
errdefer vkd.destroyDevice(self.dev, null);
try self.createDevice(window, instance);
self.dispatch = try DeviceDispatch.load(self.handle, instance.dispatch.dispatch.vkGetDeviceProcAddr);
errdefer self.dispatch.destroyDevice(self.handle, null);
// TODO: volk init?
try queues.init();
try queues.init(instance, &self, window);
return self;
}
pub fn deinit(self: *Self) void {
queues.deinit();
if (self.dev) {
vkd.destroyDevice(self.dev, null);
}
pub fn deinit(self: *const Self) void {
queues.deinit(self);
self.dispatch.destroyDevice(self.handle, null);
}
pub fn waitIdle(self: *Self) !void {
try self.dev.waitIdle();
pub fn waitIdle(self: *const Self) !void {
try self.dispatch.deviceWaitIdle(self.handle);
}
pub fn getName(self: *Self) ![]u8 {
@ -134,37 +137,47 @@ const Device = struct {
has_queue_support: bool,
};
fn selectPhysicalDevice(self: *Device, instance: *Instance) !void {
const device_count: u32 = undefined;
_ = try vki.enumeratePhysicalDevices(instance.vkInst, &device_count, null);
fn selectPhysicalDevice(self: *Device, instance: *const Instance, window: *const Window) !void {
var device_count: u32 = undefined;
_ = try instance.dispatch.enumeratePhysicalDevices(instance.handle, &device_count, null);
const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count);
defer allocator.free(pdevs);
_ = try vki.enumeratePhysicalDevices(instance.vkInst, &device_count, pdevs.ptr);
_ = try instance.dispatch.enumeratePhysicalDevices(instance.handle, &device_count, pdevs.ptr);
const prop_list = try allocator.alloc(Props, device_count);
defer allocator.free(prop_list);
for (prop_list) |prop, i| {
for (prop_list) |*prop, i| {
prop.* = .{
.p_next = &prop.accstr,
.accstr = .{
.s_type = .physical_device_acceleration_structure_properties_khr,
.p_next = &prop.rtpipe,
},
.rtpipe = .{
.s_type = .physical_device_ray_tracing_pipeline_properties_khr,
.phdev = .{
.s_type = .physical_device_properties_2,
.properties = undefined,
},
.accstr = undefined,
.rtpipe = undefined,
};
vkd.getPhysicalDeviceProperties2(pdevs[i], &prop.phdev);
if (settings.rt_on) {
prop.phdev.p_next = prop.accstr;
prop.accstr = .{
.s_type = .physical_device_acceleration_structure_properties_khr,
.p_next = &prop.rtpipe,
};
prop.rtpipe = .{
.s_type = .physical_device_ray_tracing_pipeline_properties_khr,
};
}
instance.dispatch.getPhysicalDeviceProperties2(pdevs[i], &prop.phdev);
}
const feats_list = allocator.alloc(Features, device_count);
const feats_list = try allocator.alloc(Features, device_count);
defer allocator.free(feats_list);
for (feats_list) |feat, i| {
for (feats_list) |*feat, i| {
feat.phdev.s_type = .physical_device_features_2;
if (settings.rt_on) {
feat.phdev.p_next = &feat.accstr;
@ -175,23 +188,23 @@ const Device = struct {
feat.rquery.s_type = .physical_device_ray_query_features_khr;
}
vkd.getPhysicalDeviceFeatures2(pdevs[i], &feat.phdev);
instance.dispatch.getPhysicalDeviceFeatures2(pdevs[i], &feat.phdev);
}
const exts_list = allocator.alloc(Extensions.dev_s, device_count);
const exts_list = try allocator.alloc(Extensions.Device, device_count);
defer allocator.free(exts_list);
for (exts_list) |ext, i| {
const count: u32 = undefined;
vkd.enumerateDeviceExtensionProperties(pdevs[i], null, &count, null);
const props = allocator.alloc(vk.ExtensionProperties, count);
for (exts_list) |*ext, i| {
var count: u32 = undefined;
_ = try instance.dispatch.enumerateDeviceExtensionProperties(pdevs[i], null, &count, null);
const props = try allocator.alloc(vk.ExtensionProperties, count);
defer allocator.free(props);
vkd.enumerateDeviceExtensionProperties(pdevs[i], null, &count, props.ptr);
_ = try instance.dispatch.enumerateDeviceExtensionProperties(pdevs[i], null, &count, props.ptr);
ext.* = Extensions.Device.get(props);
}
const scores_list = allocator.alloc(DeviceScore, device_count);
const scores_list = try allocator.alloc(DeviceScore, device_count);
defer allocator.free(scores_list);
for (scores_list) |_, i| {
@ -201,17 +214,19 @@ const Device = struct {
scores_list[i].feat_score = featsEval(&feats_list[i]);
scores_list[i].prop_score = propsEval(&prop_list[i]);
const queue_support = try queues.QueueSupport.init(&pdevs[i]);
const queue_support = try queues.Support.init(instance, pdevs[i], window);
defer queue_support.deinit();
var has_queue_support = true;
for (queue_support.families) |family| {
has_queue_support &= family >= 0;
if (family == null) {
has_queue_support = false;
}
}
scores_list[i].has_queue_support = has_queue_support;
}
var chosen_dev: ?i32 = null;
var chosen_dev: ?usize = null;
for (scores_list) |score, i| {
if (!score.has_required_exts) {
continue;
@ -251,84 +266,87 @@ const Device = struct {
self.props = prop_list[chosen];
self.feats = feats_list[chosen];
self.exts = exts_list[chosen];
self.dev = pdevs[chosen];
self.physical_device = pdevs[chosen];
} else unreachable;
}
fn hasRequired(exts: Extensions) bool {
fn hasRequired(exts: *Extensions.Device) bool {
var has_all = false;
has_all &= exts.khr_swapchain;
has_all = has_all and exts.khr_swapchain;
return has_all;
}
fn rtEval(exts: *Extensions) i32 {
var score = 0;
score += if (exts.khr_acceleration_structure and exts.khr_ray_tracing_pipeline) 1 else 0;
score += if (exts.khr_ray_query) 1 else 0;
fn rtEval(exts: *Extensions.Device) i32 {
var score: i32 = 0;
if (settings.rt_on) {
score += if (exts.khr_acceleration_structure and exts.khr_ray_tracing_pipeline) 1 else 0;
score += if (exts.khr_ray_query) 1 else 0;
}
return score;
}
fn extsEval(exts: *Extensions) i32 {
var score = 0;
score += if (exts.ext_memory_budget) 1 else 0;
score += if (exts.ext_hdr_metadata) 1 else 0;
score += if (exts.khr_shader_float16_int8) 1 else 0;
score += if (exts.khr_16bit_storage) 1 else 0;
score += if (exts.khr_push_descriptor) 1 else 0;
score += if (exts.ext_memory_priority) 1 else 0;
score += if (exts.khr_bind_memory2) 1 else 0;
score += if (exts.shader_float_controls) 1 else 0;
score += if (exts.spirv_1_4) 1 else 0;
score += if (exts.conditional_rendering) 1 else 0;
score += if (exts.draw_indirect_count) 1 else 0;
fn extsEval(exts: *Extensions.Device) i32 {
var score: i32 = 0;
// https://github.com/ziglang/zig/issues/137
score += if (exts.ext_memory_budget) @as(i32, 1) else 0;
score += if (exts.ext_hdr_metadata) @as(i32, 1) else 0;
score += if (exts.khr_shader_float_16_int_8) @as(i32, 1) else 0;
score += if (exts.khr_1_6bit_storage) @as(i32, 1) else 0;
score += if (exts.khr_push_descriptor) @as(i32, 1) else 0;
score += if (exts.ext_memory_priority) @as(i32, 1) else 0;
score += if (exts.khr_bind_memory_2) @as(i32, 1) else 0;
score += if (exts.khr_shader_float_controls) @as(i32, 1) else 0;
score += if (exts.khr_spirv_1_4) @as(i32, 1) else 0;
score += if (exts.ext_conditional_rendering) @as(i32, 1) else 0;
score += if (exts.khr_draw_indirect_count) @as(i32, 1) else 0;
return score;
}
fn featsEval(feats: *Features) i32 {
var score = 0;
var score: i32 = 0;
// ------------------------------------------------------------------------
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceFeatures.html
// highly useful things
score += if (feats.phdev.features.full_draw_index_uint_32) 16 else 0;
score += if (feats.phdev.features.sampler_anisotropy) 16 else 0;
score += if (feats.phdev.features.texture_compression_bc) 16 else 0;
score += if (feats.phdev.features.independent_blend) 16 else 0;
score += if (feats.phdev.features.full_draw_index_uint_32 != 0) @as(i32, 16) else 0;
score += if (feats.phdev.features.sampler_anisotropy != 0) @as(i32, 16) else 0;
score += if (feats.phdev.features.texture_compression_bc != 0) @as(i32, 16) else 0;
score += if (feats.phdev.features.independent_blend != 0) @as(i32, 16) else 0;
// debug drawing
score += if (feats.phdev.features.fill_mode_non_solid) 2 else 0;
score += if (feats.phdev.features.wide_lines) 2 else 0;
score += if (feats.phdev.features.large_points) 2 else 0;
score += if (feats.phdev.features.fill_mode_non_solid != 0) @as(i32, 2) else 0;
score += if (feats.phdev.features.wide_lines != 0) @as(i32, 2) else 0;
score += if (feats.phdev.features.large_points != 0) @as(i32, 2) else 0;
// profiling
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkQueryPipelineStatisticFlagBits.html
score += if (feats.phdev.features.pipeline_statistics_query) 2 else 0;
score += if (feats.phdev.features.pipeline_statistics_query != 0) @as(i32, 2) else 0;
// shader features
score += if (feats.phdev.features.fragment_stores_and_atomics) 4 else 0;
score += if (feats.phdev.features.shader_int_64) 4 else 0;
score += if (feats.phdev.features.shader_int_16) 1 else 0;
score += if (feats.phdev.features.shader_storage_image_extended_formats) 4 else 0;
score += if (feats.phdev.features.fragment_stores_and_atomics != 0) @as(i32, 4) else 0;
score += if (feats.phdev.features.shader_int_64 != 0) @as(i32, 4) else 0;
score += if (feats.phdev.features.shader_int_16 != 0) @as(i32, 1) else 0;
score += if (feats.phdev.features.shader_storage_image_extended_formats != 0) @as(i32, 4) else 0;
// dynamic indexing
score += if (feats.phdev.features.shader_uniform_buffer_array_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.shader_storage_buffer_array_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.shader_sampled_imageArray_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.shader_storage_imageArray_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.image_cube_array) 1 else 0;
score += if (feats.phdev.features.shader_uniform_buffer_array_dynamic_indexing != 0) @as(i32, 4) else 0;
score += if (feats.phdev.features.shader_storage_buffer_array_dynamic_indexing != 0) @as(i32, 4) else 0;
score += if (feats.phdev.features.shader_sampled_image_array_dynamic_indexing != 0) @as(i32, 4) else 0;
score += if (feats.phdev.features.shader_storage_image_array_dynamic_indexing != 0) @as(i32, 4) else 0;
score += if (feats.phdev.features.image_cube_array != 0) @as(i32, 1) else 0;
// indirect and conditional rendering
score += if (feats.phdev.features.full_draw_index_uint_32) 1 else 0;
score += if (feats.phdev.features.multi_draw_indirect) 1 else 0;
score += if (feats.phdev.features.draw_indirect_first_instance) 1 else 0;
score += if (feats.phdev.features.full_draw_index_uint_32 != 0) @as(i32, 1) else 0;
score += if (feats.phdev.features.multi_draw_indirect != 0) @as(i32, 1) else 0;
score += if (feats.phdev.features.draw_indirect_first_instance != 0) @as(i32, 1) else 0;
if (settings.rt_on) {
// ------------------------------------------------------------------------
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceAccelerationStructureFeaturesKHR.html
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#acceleration-structure
score += if (feats.accstr.acceleration_structure) 64 else 0;
score += if (feats.accstr.acceleration_structure != 0) @as(i32, 64) else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#vkCmdBuildAccelerationStructuresIndirectKHR
//score += if (feats.accstr.accelerationStructureIndirectBuild) 16 else 0;
@ -343,7 +361,7 @@ const Device = struct {
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceRayTracingPipelineFeaturesKHR.html
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#ray-tracing
score += if (feats.rtpipe.ray_tracing_pipeline) 64 else 0;
score += if (feats.rtpipe.ray_tracing_pipeline != 0) @as(i32, 64) else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#vkCmdTraceRaysIndirectKHR
//score += if (feats.rtpipe.rayTracingPipelineTraceRaysIndirect) 16 else 0;
@ -355,15 +373,15 @@ const Device = struct {
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceRayQueryFeaturesKHR.html
// https://github.com/KhronosGroup/SPIRV-Registry/blob/master/extensions/KHR/SPV_KHR_ray_query.asciidoc
score += if (feats.rquery.ray_query) 64 else 0;
score += if (feats.rquery.ray_query != 0) @as(i32, 64) else 0;
}
return score;
}
fn propsEval(props: *Props) i32 {
var score = 0;
score += limitsEval(props.limits);
var score: i32 = 0;
score += limitsEval(&props.phdev.properties.limits);
if (settings.rt_on) {
score += accStrEval(props.accstr);
@ -373,7 +391,7 @@ const Device = struct {
}
fn limitsEval(lims: *const vk.PhysicalDeviceLimits) i32 {
var score = 0;
var score: u32 = 0;
score += log2(lims.max_image_dimension_1d);
score += log2(lims.max_image_dimension_2d);
score += log2(lims.max_image_dimension_3d);
@ -396,11 +414,11 @@ const Device = struct {
score += log2(lims.max_framebuffer_width);
score += log2(lims.max_framebuffer_height);
score += log2(lims.max_color_attachments);
return score;
return @intCast(i32, score);
}
fn accStrEval(accstr: *const vk.PhysicalDeviceAccelerationStructurePropertiesKHR) i32 {
var score = 0;
fn accStrEval(accstr: *const vk.PhysicalDeviceAccelerationStructurePropertiesKHR) u32 {
var score: u32 = 0;
score += log2(accstr.max_geometry_count);
score += log2(accstr.max_instance_count);
score += log2(accstr.max_primitive_count);
@ -410,45 +428,47 @@ const Device = struct {
return score;
}
fn rtPipeEval(rtpipe: *const vk.PhysicalDeviceRaytracingPipelineFeaturesKHR) i32 {
var score = 0;
fn rtPipeEval(rtpipe: *const vk.PhysicalDeviceRayTracingPipelineFeaturesKHR) u32 {
var score: u32 = 0;
score += log2(rtpipe.max_ray_recursion_depth);
score += log2(rtpipe.max_ray_dispatch_invocation_count);
score += log2(rtpipe.mayx_ray_hit_attribute_size);
return score;
}
fn createDevice(self: *Self, renderer: *Renderer) !void {
const surface = renderer.window.surface;
const queue_support = try queues.QueueSupport.init(surface);
fn createDevice(self: *Self, window: *const Window, instance: *const Instance) !void {
const queue_support = try queues.Support.init(instance, self.physical_device, window);
defer queue_support.deinit();
const families = std.ArrayHashMap(i32, i32).init(allocator);
var families = std.AutoArrayHashMap(u32, i32).init(allocator);
defer families.deinit();
for (queue_support.families) |family| {
const result = families.getOrPutAssumeCapacity(family);
result.value_ptr.* = if (result.found_existing) result.value_ptr.* + 1 else 1;
for (queue_support.families) |f| {
if (f) |family| {
const result = families.getOrPutAssumeCapacity(family);
result.value_ptr.* = if (result.found_existing) result.value_ptr.* + 1 else 1;
}
}
const priorities = [_]f32{ 1.0, 1.0, 1.0, 1.0 };
const queue_infos = [queues.QueueId.count]vk.DeviceQueueCreateInfo{};
const it = families.iterator();
var queue_infos: [queues.QueueId.count]vk.DeviceQueueCreateInfo = undefined;
var it = families.iterator();
{
var i: usize = 0;
while (it.next()) |entry| : (i += 1) {
queue_infos[i] = .{
.s_type = .device_queue_create_info,
.queue_family_index = entry.key_ptr.*,
.queue_count = entry.value_ptr.*,
.p_queue_priorities = priorities,
.flags = .{},
.queue_family_index = @intCast(u32, entry.key_ptr.*),
.queue_count = @intCast(u32, entry.value_ptr.*),
.p_queue_priorities = &priorities,
};
}
}
const ph_feats = &self.props.phdev_features;
const ph_feats = self.feats.phdev.features;
self.props = .{
self.feats = .{
.phdev = .{
.s_type = .physical_device_features_2,
.features = .{
@ -494,24 +514,25 @@ const Device = struct {
};
}
if (self.exts.khr_ray_tracing_pipeline) {
if (settings.rt_on) {
self.props.phdev.p_next = &self.props.accstr;
self.props.accstr.p_next = &self.props.rtpipe;
self.props.rtpipe.p_next = &self.props.rquery;
}
const ext_arr = self.exts.toArray(allocator);
const ext_arr = Extensions.Device.toArray(allocator);
defer allocator.free(ext_arr);
self.dev = try vki.createDevice(self.phdev, &.{
_ = try instance.dispatch.createDevice(self.physical_device, &.{
.flags = .{},
.p_next = &self.props.phdev,
.queue_create_info_count = families.count(),
.p_queue_create_infos = queue_infos,
.enabled_layer_count = layers.len,
.p_enabled_layer_names = layers,
.enabled_extension_count = ext_arr.len,
.p_enabled_extension_names = ext_arr.ptr,
.queue_create_info_count = @intCast(u32, families.count()),
.p_queue_create_infos = &queue_infos,
.enabled_layer_count = @intCast(u32, enabled_layers.len),
.pp_enabled_layer_names = &enabled_layers,
.p_enabled_features = null,
.enabled_extension_count = @intCast(u32, ext_arr.len),
.pp_enabled_extension_names = ext_arr.ptr,
}, null);
}

View File

@ -1,40 +1,45 @@
const std = @import("std");
const glfw = @import("glfw");
const vk = @import("vulkan");
const Renderer = @import("Renderer.zig");
const Instance = @import("instance.zig").Instance;
const Display = struct {
pub fn getWorkSize() !vk.Extent2D {
// TODO memory
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
pub const Display = struct {
pub fn getWorkSize() !glfw.Window.Size {
try glfw.init(.{});
const monitor = try glfw.getPoop();
const work_area = try monitor.getWorkArea();
const monitor = try glfw.Monitor.getPrimary() orelse error.NoPrimaryMonitor;
const work_area = try monitor.getWorkarea();
if (work_area.width <= 0 or work_area.height <= 0) {
return error.WorkAreaNotReady;
return error.WorkareaNotReady;
}
return .{
return glfw.Window.Size{
.width = work_area.width,
.height = work_area.height,
};
}
pub fn getFullSize() !glfw.Size {
pub fn getFullSize() !glfw.Window.Size {
try glfw.init(.{});
const monitor = try glfw.getPrimaryMonitor();
const modeCount: u32 = undefined;
const modes = try monitor.getVideoModes(monitor, &modeCount);
if (modeCount <= 0) {
const monitor = try glfw.Monitor.getPrimary() orelse error.NoPrimaryMonitor;
const modes = try monitor.getVideoModes(allocator);
defer allocator.free(modes);
if (modes.len <= 0) {
return error.NoModes;
}
var chosenArea: u32 = undefined;
var chosenMode: *glfw.VideoMode = undefined;
var chosenArea: ?u32 = null;
var chosenMode: ?*const glfw.VideoMode = null;
for (modes) |mode| {
const area = mode.width * mode.height;
if (area > chosenArea) {
const area = mode.getWidth() * mode.getHeight();
if (area > chosenArea orelse 0) {
chosenArea = area;
chosenMode = &mode;
}
@ -44,54 +49,50 @@ const Display = struct {
return error.NoModes;
}
return .{
.width = chosenMode.width,
.height = chosenMode.height,
return glfw.Window.Size{
.width = (chosenMode orelse unreachable).getWidth(),
.height = (chosenMode orelse unreachable).getHeight(),
};
}
pub fn getSize(fullscreen: bool) !glfw.Size {
pub fn getSize(fullscreen: bool) !glfw.Window.Size {
return if (fullscreen) try getFullSize() else try getWorkSize();
}
};
const Window = struct {
pub const Window = struct {
const Self = @This();
fullscreen: bool,
handle: glfw.Window,
size: glfw.Size,
size: glfw.Window.Size,
surface: vk.SurfaceKHR,
pub fn init(instance: *Instance, title: [][*:0]u8, width: u32, height: u32, fullscreen: bool) !void {
pub fn init(instance: *const Instance, title: [*:0]const u8, width: u32, height: u32, fullscreen: bool) !Self {
try glfw.init(.{});
const self = Self{
.fullscreen = fullscreen,
};
self.handle = try glfw.Window.create(width, height, title, glfw.getPrimaryMonitor(), null, .{
const handle = try glfw.Window.create(width, height, title, try glfw.Monitor.getPrimary() orelse error.NoPrimaryMonitor, null, .{
.client_api = .no_api,
.srgb_capable = true,
.auto_iconify = !fullscreen,
.maximized = !fullscreen,
});
errdefer self.deinit();
errdefer handle.destroy();
// TODO: input: register window
var surface: vk.SurfaceKHR = undefined;
_ = try glfw.createWindowSurface(instance.handle, handle, null, &surface);
self.size = try self.handle.getFramebufferSize();
self.surface = try glfw.createWindowSurface(instance.vkInst);
return self;
return Self{
.fullscreen = fullscreen,
.handle = handle,
.size = try handle.getFramebufferSize(),
.surface = surface,
};
}
pub fn deinit(self: *Self, instance: *Instance) void {
if (self.surface) {
self.vki.destroySurfaceKHR(instance.vkInst, self.surface, null);
}
if (self.handle) {
self.handle.destroy();
}
pub fn deinit(self: *const Self, instance: *const Instance) void {
instance.dispatch.destroySurfaceKHR(instance.handle, self.surface, null);
self.handle.destroy();
}
pub fn updateSize(self: *Self) !bool {

View File

@ -1,6 +1,5 @@
const std = @import("std");
const vk = @import("vulkan");
const vkd = @import("device.zig").DeviceDispatch;
const Image = @import("image.zig").Image;
const renderpass = @import("render_pass.zig");
const Device = @import("device.zig").Device;
@ -24,21 +23,17 @@ const Value = struct {
const Self = @This();
const HashMap = std.HashMap(Key, Value);
const s_fbufs = undefined;
const FramebufHashMap = std.AutoArrayHashMap(Key, Value);
var s_fbufs: FramebufHashMap = undefined;
pub fn init() !void {
s_fbufs = try HashMap.init(allocator);
s_fbufs = FramebufHashMap.init(allocator);
try s_fbufs.ensureTotalCapacity(16);
}
pub fn deinit(device: *Device) void {
const it = s_fbufs.iterator();
{
while (it.next()) |entry| {
vkd.destroyFramebuffer(device.dev, entry.handle, null);
entry.handle = null;
}
pub fn deinit(device: *const Device) void {
for (s_fbufs.values()) |*entry| {
device.dispatch.destroyFramebuffer(device.handle, entry.handle, null);
}
s_fbufs.deinit();
@ -71,6 +66,16 @@ pub fn getOrAdd(attachments: []*const Image, width: i32, height: i32) !*Framebuf
return result.value_ptr;
}
pub fn remove(device: *const Device, view: vk.ImageView) void {
var iter = s_fbufs.iterator();
while (iter.next()) |entry| {
if (containsView(entry.key_ptr, view)) {
Framebuffer.deinit(entry.value_ptr.handle, device);
_ = s_fbufs.swapRemove(entry.key_ptr.*);
}
}
}
fn containsView(key: *const Key, view: vk.ImageView) bool {
for (key.attachments) |attachment| {
if (attachment == view) {
@ -81,8 +86,8 @@ fn containsView(key: *const Key, view: vk.ImageView) bool {
return false;
}
const Framebuffer = struct {
pub fn init(device: *Device, attachments: []const vk.ImageView, formats: []const vk.Format, width: i32, height: i32) !Self {
pub const Framebuffer = struct {
pub fn init(device: *Device, attachments: []const vk.ImageView, formats: []const vk.Format, width: i32, height: i32) !vk.Framebuffer {
const pass_desc: renderpass.Description = .{
.src_access_mask = .shader_read_bit,
.dst_access_mask = .color_attachment_write_bit,
@ -100,7 +105,7 @@ const Framebuffer = struct {
}
const pass = try renderpass.get(&pass_desc);
vkd.createFramebuffer(device.dev, .{
return try device.dispatch.createFramebuffer(device.handle, .{
.s_type = .framebuffer_create_info,
.render_pass = pass,
.attachment_count = attachments.len,
@ -111,7 +116,7 @@ const Framebuffer = struct {
}, null);
}
pub fn deinit(self: *Self, device: *Device) !void {
vkd.destroyFramebuffer(device.dev, self.handle, null);
pub fn deinit(framebuffer: vk.Framebuffer, device: *const Device) void {
device.dispatch.destroyFramebuffer(device.handle, framebuffer, null);
}
};

View File

@ -1,17 +1,16 @@
const vk = @import("vulkan");
const vma = @import("vma");
const vkd = @import("device.zig").DeviceDispatch;
const Device = @import("device.zig").Device;
const Command = @import("Command.zig");
const SubmitId = @import("Swapchain.zig").SubmitId;
const SubmitId = @import("swapchain.zig").SubmitId;
const std = @import("std");
const queues = @import("queues.zig");
const settings = @import("settings.zig");
const memory = @import("memory.zig");
const Swapchain = @import("Swapchain.zig");
const Swapchain = @import("swapchain.zig").Swapchain;
const Image = packed struct {
pub const Image = packed struct {
const Self = @This();
state: ImageState,
@ -35,33 +34,31 @@ const Image = packed struct {
return self;
}
pub fn release(self: *Self) !void {
pub fn release(self: *const Self) !void {
self.getSubmit();
}
pub fn deinit(self: *Self) void {
memory.imageDel(self);
pub fn deinit(self: *const Self, device: *const Device) void {
memory.imageDel(self, device);
}
/// import vk.Image handle into an existing Image object.
pub fn import(self: *Image, device: *Device, info: *const vk.ImageCreateInfo, handle: vk.Image) !void {
pub fn import(self: *Image, device: *const Device, info: *const vk.ImageCreateInfo, handle: vk.Image) !void {
self.handle = handle;
self.allocation = null;
self.view = null;
self.image_type = info.image_type;
self.format = info.format;
self.state.layout = info.initial_layout;
self.usage = info.usage;
self.width = info.extent.width;
self.height = info.extent.height;
self.depth = info.extent.depth;
self.mip_levels = info.mip_levels;
self.array_layers = info.array_layers;
self.width = @intCast(u16, info.extent.width);
self.height = @intCast(u16, info.extent.height);
self.depth = @intCast(u12, info.extent.depth);
self.mip_levels = @intCast(u8, info.mip_levels);
self.array_layers = @intCast(u8, info.array_layers);
self.imported = true;
const view_info = try infoToViewInfo(info);
var view_info = try infoToViewInfo(info);
view_info.image = self.handle;
try vkd.createImageView(device.dev, &view_info, null, &self.view);
self.view = try device.dispatch.createImageView(device.handle, &view_info, null);
errdefer memory.imageDel(self);
}
@ -83,13 +80,13 @@ const Image = packed struct {
}
};
const ImageState = struct {
const ImageState = packed struct {
owner: queues.QueueId,
cmd_id: i32,
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
layout: vk.ImageLayout,
substates: []SubImageState,
substates: [*]SubImageState,
};
const ImageSet = struct {
@ -128,7 +125,7 @@ const ImageSet = struct {
}
};
const SubImageState = struct {
const SubImageState = packed struct {
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
layout: vk.ImageLayout,
@ -136,12 +133,15 @@ const SubImageState = struct {
pub fn infoToViewInfo(info: *const vk.ImageCreateInfo) !vk.ImageViewCreateInfo {
// anything but transfer usage needs a view
const viewless: vk.ImageUsageFlags = .transfer_src_bit | .transfer_dst_bit;
if (info.usage & (~viewless)) {
const viewless: vk.ImageUsageFlags = .{ .transfer_src_bit = true, .transfer_dst_bit = true };
if (!info.usage.contains(viewless)) {
return vk.ImageViewCreateInfo{
.s_type = .image_view_create_info,
.flags = .{},
.format = info.format,
.view_type = infoToViewType(info),
.image = undefined,
.components = undefined,
.subresource_range = .{
.aspect_mask = infoToAspects(info),
.base_mip_level = 0,
@ -159,18 +159,18 @@ pub fn infoToViewInfo(info: *const vk.ImageCreateInfo) !vk.ImageViewCreateInfo {
pub fn infoToViewType(info: *const vk.ImageCreateInfo) vk.ImageViewType {
return switch (info.image_type) {
.@"1d" => {
if (info.array_layers <= 1) .@"1d" else .@"1d_array";
return if (info.array_layers <= 1) .@"1d" else .@"1d_array";
},
.@"3d" => {
.@"3d";
return .@"3d";
},
else => {
if (info.array_layers <= 1) {
.@"2d";
return .@"2d";
} else if (info.array_layers == 6) {
.cube;
return .cube;
} else {
.@"2d_array";
return .@"2d_array";
}
},
};
@ -178,9 +178,9 @@ pub fn infoToViewType(info: *const vk.ImageCreateInfo) vk.ImageViewType {
pub fn infoToAspects(info: *const vk.ImageCreateInfo) vk.ImageAspectFlags {
return switch (info.format) {
.d16_unorm, .x8_d24_unorm_pack32, .d32_sfloat => .depth_bit,
.d16_unorm_s8_uint, .d24_unorm_s8_uint, .d32_sfloat_s8_uint => .depth_bit | .stencil_bit,
.s8_uint => .stencil_bit,
else => .color_bit,
.d16_unorm, .x8_d24_unorm_pack32, .d32_sfloat => .{ .depth_bit = true },
.d16_unorm_s8_uint, .d24_unorm_s8_uint, .d32_sfloat_s8_uint => .{ .depth_bit = true, .stencil_bit = true },
.s8_uint => .{ .stencil_bit = true },
else => .{ .color_bit = true },
};
}

View File

@ -2,23 +2,26 @@ const std = @import("std");
const vk = @import("vulkan");
const glfw = @import("glfw");
const vkb = @import("base.zig").BaseDispatch;
const settings = @import("settings.zig");
const Base = @import("Base.zig");
const Extensions = @import("Extensions.zig");
const layers = @import("layers.zig").layers;
const enabled_layers = @import("layers.zig").enabled;
const DebugMessenger = @import("DebugMessenger.zig");
// TODO temp allocator
const allocator = std.heap.GeneralPurposeAllocator;
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const InstanceDispatch = vk.InstanceWrapper(.{
.destroyInstance = true,
.createDevice = true,
.destroySurfaceKHR = true,
.enumeratePhysicalDevices = true,
.getPhysicalDeviceProperties = true,
.createDebugUtilsMessengerEXT = true,
.destroyDebugUtilsMessengerEXT = true,
.getPhysicalDeviceProperties2 = true,
.getPhysicalDeviceFeatures2 = true,
.enumerateDeviceExtensionProperties = true,
.getPhysicalDeviceSurfaceFormatsKHR = true,
.getPhysicalDeviceSurfacePresentModesKHR = true,
@ -28,27 +31,26 @@ const InstanceDispatch = vk.InstanceWrapper(.{
.getPhysicalDeviceMemoryProperties = true,
.getDeviceProcAddr = true,
});
const vki = InstanceDispatch;
pub const Instance = struct {
const Self = @This();
vkInst: vk.Instance,
messenger: DebugMessenger,
dispatch: InstanceDispatch,
handle: vk.Instance,
messenger: DebugMessenger = undefined,
layers: [][*:0]const u8,
extensions: [][*:0]const u8,
layers: std.ArrayList([*:0]const u8),
extensions: std.ArrayList([*:0]const u8),
pub fn init(self: Self) !void {
pub fn init() !Self {
// TODO: do we absolutely need volk rn? Don't think so? volk init here
const avail_layers = try getAvailableLayers();
var avail_layers = try getAvailableLayers();
defer avail_layers.deinit();
const avail_exts = try getAvailableExtensions();
var avail_exts = try getAvailableExtensions();
defer avail_exts.deinit();
self.layers = try getLayers(&avail_layers);
self.extensions = try getExtensions(&avail_exts);
const layers_to_load = try getLayers(&avail_layers);
const extensions = try getExtensions(&avail_exts);
const app_info = vk.ApplicationInfo{
.p_application_name = settings.app_name,
@ -58,23 +60,30 @@ pub const Instance = struct {
.api_version = vk.API_VERSION_1_2,
};
self.vkInst = try vkb.createInstance(&.{
const handle = try Base.dispatch.createInstance(&.{
.flags = .{},
.p_application_info = &app_info,
.enabled_layer_count = @intCast(u32, self.layers.len),
.pp_enabled_layer_names = self.layers.ptr,
.enabled_extension_count = @intCast(u32, self.extensions.len),
.pp_enabled_extension_names = self.extensions.items,
.enabled_layer_count = @intCast(u32, layers_to_load.items.len),
.pp_enabled_layer_names = layers_to_load.items.ptr,
.enabled_extension_count = @intCast(u32, extensions.items.len),
.pp_enabled_extension_names = extensions.items.ptr,
}, null);
errdefer self.vki.destroyInstance(self.vkInst, null);
self.messenger = DebugMessenger.init(&self);
const dispatch = try InstanceDispatch.load(handle, Base.vk_proc);
errdefer dispatch.destroyInstance(handle, null);
var self = Self{
.dispatch = dispatch,
.layers = layers_to_load,
.extensions = extensions,
.handle = handle,
};
self.messenger = try DebugMessenger.init(&self);
return self;
}
pub fn deinit(self: Self) void {
self.vki.destroyInstance(self.vkInst, null);
self.dispatch.destroyInstance(self.handle, null);
self.layers.deinit();
self.extensions.deinit();
@ -82,32 +91,34 @@ pub const Instance = struct {
}
/// list all available layers
fn getAvailableLayers(self: Self) !std.StringArrayHashMap {
fn getAvailableLayers() !std.StringArrayHashMap(void) {
var count: u32 = undefined;
_ = try self.vki.enumerateInstanceLayerProperties(&count, null);
const list = try allocator.alloc(self.vki.LayerProperties, count);
_ = try Base.dispatch.enumerateInstanceLayerProperties(&count, null);
const list = try allocator.alloc(vk.LayerProperties, count);
defer allocator.free(list);
_ = try self.vki.enumerateInstanceLayerProperties(&count, list.ptr);
_ = try Base.dispatch.enumerateInstanceLayerProperties(&count, list.ptr);
const hash_map = std.StringArrayHashMap(void).init(allocator);
var hash_map = std.StringArrayHashMap(void).init(allocator);
try hash_map.ensureTotalCapacity(count);
// TODO log
std.debug.print("{} available instance layers", .{count});
for (list) |layer| {
std.debug.print("{s}", layer.layer_name);
hash_map.putAssumeCapacity(layer);
const layer_name = std.mem.sliceTo(&layer.layer_name, 0);
std.debug.print("{s}", .{layer_name});
hash_map.putAssumeCapacity(layer_name, {});
}
return hash_map;
}
fn getLayers(avail_layers: *std.StringArrayHashMap) !std.ArrayList([*:0]const u8) {
const list = try std.ArrayList([*:0]const u8).init(allocator);
fn getLayers(avail_layers: *std.StringArrayHashMap(void)) !std.ArrayList([*:0]const u8) {
var list = std.ArrayList([*:0]const u8).init(allocator);
for (layers.enabled) |ext| {
if (avail_layers.contains(ext)) {
list.append(ext);
for (enabled_layers) |layer| {
const layer_name = std.mem.sliceTo(layer, 0);
if (avail_layers.contains(layer_name)) {
try list.append(layer);
}
}
@ -115,40 +126,43 @@ pub const Instance = struct {
}
/// list all available extensions
fn getAvailableExtensions(self: Self) !std.StringArrayHashMap {
fn getAvailableExtensions() !std.StringArrayHashMap(void) {
var count: u32 = undefined;
_ = try self.vki.enumerateInstanceExtensionProperties(&count, null);
const list = try allocator.alloc(self.vki.ExtensionProperties, count);
_ = try Base.dispatch.enumerateInstanceExtensionProperties(null, &count, null);
const list = try allocator.alloc(vk.ExtensionProperties, count);
defer allocator.free(list);
_ = try self.vki.enumerateInstanceExtensionProperties(&count, list.ptr);
_ = try Base.dispatch.enumerateInstanceExtensionProperties(null, &count, list.ptr);
const hash_map = std.StringArrayHashMap(void).init(allocator);
var hash_map = std.StringArrayHashMap(void).init(allocator);
try hash_map.ensureTotalCapacity(count);
// TODO log
std.debug.print("{} available instance extensions", count);
std.debug.print("{} available instance extensions", .{count});
for (list) |ext| {
std.debug.print("{s}", ext.extension_name);
hash_map.putAssumeCapacity(ext);
const extension_name = std.mem.sliceTo(&ext.extension_name, 0);
std.debug.print("{s}", .{extension_name});
hash_map.putAssumeCapacity(extension_name, {});
}
return hash_map;
}
fn getExtensions(avail_exts: *std.StringArrayHashMap) !std.ArrayList([][*:0]const u8) {
const list = try std.ArrayList([*:0]const u8).init(allocator);
fn getExtensions(avail_exts: *std.StringArrayHashMap(void)) !std.ArrayList([*:0]const u8) {
var list = std.ArrayList([*:0]const u8).init(allocator);
for (try glfw.getRequiredInstanceExtensions()) |ext| {
if (avail_exts.contains(ext)) {
list.append(ext);
const extension_name = std.mem.sliceTo(ext, 0);
if (avail_exts.contains(extension_name)) {
try list.append(extension_name);
}
}
const instExts = Extensions.toList(Extensions.Instance);
defer instExts.deinit();
const instExts = Extensions.Instance.toArray(allocator);
defer allocator.free(instExts);
for (instExts) |ext| {
if (avail_exts.contains(ext)) {
list.append(ext);
const extension_name = std.mem.sliceTo(ext, 0);
if (avail_exts.contains(extension_name)) {
try list.append(ext);
}
}

View File

@ -1,6 +1,6 @@
const settings = @import("settings.zig");
pub const layers = [_][*:0]const u8{
pub const enabled = [_][*:0]const u8{
if (settings.khronos_layer_on) "KHRONOS_validation" else void,
if (settings.assist_layer_on) "LUNARG_assistant_layer" else void,
};

View File

@ -1,18 +1,40 @@
const std = @import("std");
const vk = @import("vulkan");
const dev = @import("device.zig");
const vkd = dev.DeviceDispatch;
const Device = dev.Device;
const framebuffer = @import("framebuffer.zig");
const Device = @import("device.zig").Device;
const Instance = @import("instance.zig").Instance;
const SubmitId = @import("swapchain.zig").SubmitId;
const settings = @import("settings.zig");
const queues = @import("queues.zig");
const vma = @import("vma");
const profiler = @import("/common/profiler.zig");
const profiler = @import("../../common/profiler.zig");
const Image = @import("image.zig").Image;
// TODO
const allocator = undefined;
var s_allocator: Allocator = undefined;
pub fn init(instance: *const Instance, device: *const Device) !void {
s_allocator = try Allocator.init(instance, device);
}
pub fn deinit(device: *const Device) void {
finalize(device) catch {};
s_allocator.deinit();
}
pub fn finalize(device: *const Device) !void {
try device.waitIdle();
s_allocator.mutex.lock();
defer s_allocator.mutex.unlock();
for (s_allocator.releasables) |releasable| {
//Command.wait(releasable.submit_id);
releasable.deinit(device, s_allocator);
}
}
const pm_imgnew = profiler.ProfileMark.init("memory.imageNew");
pub fn imageNew(img: *Image, info: *const vk.ImageCreateInfo, mem_usage: vma.MemUsage) !void {
pub fn imageNew(img: *Image, info: *const vk.ImageCreateInfo, mem_usage: vma.MemoryUsage) !void {
try pm_imgnew.begin();
defer pm_imgnew.end();
@ -30,8 +52,10 @@ pub fn imageNew(img: *Image, info: *const vk.ImageCreateInfo, mem_usage: vma.Mem
img.array_layers = info.array_layers;
img.imported = false;
try vma.createImage(allocator.handle, info, &.{
.flags = .within_budget_bit,
try vma.createImage(s_allocator.handle, info, &.{
.flags = .{
.withinBudget = true,
},
.usage = mem_usage,
.pool = getTexturePool(info.usage, mem_usage),
}, &img.handle, &img.allocation, null);
@ -45,24 +69,19 @@ pub fn imageNew(img: *Image, info: *const vk.ImageCreateInfo, mem_usage: vma.Mem
}
const pm_imagedel = profiler.ProfileMark.init("memory.imageDel");
pub fn imageDel(img: *Image, device: *Device) void {
try pm_imagedel.start();
pub fn imageDel(img: *const Image, device: *const Device) void {
pm_imagedel.begin() catch {};
defer pm_imagedel.end();
if (img.view != null) {
vkd.destroyImageView(device.dev, img.view, null);
}
if (img.allocation != null) {
vma.destroyImage(allocator, img.handle, img.allocation);
}
device.dispatch.destroyImageView(device.handle, img.view, null);
vma.Allocator.destroyImage(s_allocator.handle, img.handle, img.allocation);
}
fn getTexturePool(usage: vk.ImageUsageFlags, mem_usage: vma.MemUsage) ?vma.Pool {
fn getTexturePool(usage: vk.ImageUsageFlags, mem_usage: vma.MemoryUsage) ?vma.Pool {
const pool = switch (mem_usage) {
.gpu_only => {
if (usage & allocator.device_texture_pool.image_usage) {
allocator.device_texture_pool.handle;
if (usage & s_allocator.device_texture_pool.image_usage) {
s_allocator.device_texture_pool.handle;
} else {
null;
}
@ -75,3 +94,258 @@ fn getTexturePool(usage: vk.ImageUsageFlags, mem_usage: vma.MemUsage) ?vma.Pool
std.debug.assert(pool != null || (usage & attachment_usage));
return pool;
}
const Pool = struct {
const Self = @This();
handle: vma.Pool,
size: vk.DeviceSize,
buffer_usage: ?vk.BufferUsageFlags = null,
image_usage: ?vk.ImageUsageFlags = null,
mem_usage: vma.MemoryUsage,
queue_usage: queues.QueueFlags,
pub fn init(handle: vma.Allocator, size: vk.DeviceSize, buffer_usage: ?vk.BufferUsageFlags, image_usage: ?vk.ImageUsageFlags, mem_usage: vma.MemoryUsage, queue_usage: queues.QueueFlags) !Self {
std.debug.assert(size > 0);
std.debug.assert((buffer_usage != null and image_usage == null) or (buffer_usage == null and image_usage != null));
var queue_family_count: usize = 0;
var queue_families: [queues.QueueId.count]u32 = undefined;
if (queue_usage.graphics_bit) {
queue_families[queue_family_count] = queues.get(queues.QueueId.graphics).family;
queue_family_count += 1;
}
if (queue_usage.compute_bit) {
queue_families[queue_family_count] = queues.get(queues.QueueId.compute).family;
queue_family_count += 1;
}
if (queue_usage.transfer_bit) {
queue_families[queue_family_count] = queues.get(queues.QueueId.transfer).family;
queue_family_count += 1;
}
if (queue_usage.present_bit) {
queue_families[queue_family_count] = queues.get(queues.QueueId.present).family;
queue_family_count += 1;
}
const size_u32 = @intCast(u32, size);
if (buffer_usage) |usage| {
const mem_type_index = try handle.findMemoryTypeIndexForBufferInfo(.{
.s_type = .buffer_create_info,
.flags = .{},
.size = size_u32,
.usage = usage,
.sharing_mode = .exclusive,
.queue_family_index_count = @intCast(u32, queue_family_count),
.p_queue_family_indices = &queue_families,
}, .{
.flags = .{
.withinBudget = true,
},
.usage = mem_usage,
});
const pool = try handle.createPool(.{
.memoryTypeIndex = mem_type_index,
.frameInUseCount = settings.resource_sets - 1,
});
return Self{
.size = size,
.buffer_usage = usage,
.mem_usage = mem_usage,
.queue_usage = queue_usage,
.handle = pool,
};
} else if (image_usage) |usage| {
const mem_type_index = try handle.findMemoryTypeIndexForImageInfo(.{
.s_type = .image_create_info,
.flags = .{},
.image_type = .@"2d",
.format = .r16g16b16a16_sfloat,
.extent = .{
.width = size_u32,
.height = size_u32,
.depth = 1,
},
.mip_levels = 1 + std.math.log2(size_u32),
.array_layers = 1,
.samples = .{
.@"1_bit" = true,
},
.tiling = .optimal,
.usage = usage,
.sharing_mode = .exclusive,
.queue_family_index_count = @intCast(u32, queue_family_count),
.p_queue_family_indices = &queue_families,
.initial_layout = .@"undefined",
}, .{
.flags = .{
.withinBudget = true,
},
.usage = mem_usage,
});
const pool = try handle.createPool(.{
.memoryTypeIndex = mem_type_index,
.frameInUseCount = settings.resource_sets,
});
return Self{
.size = size,
.image_usage = usage,
.mem_usage = mem_usage,
.queue_usage = queue_usage,
.handle = pool,
};
} else {
return error.InvalidUsageFlags;
}
}
pub fn deinit(self: *const Self, handle: vma.Allocator) void {
handle.destroyPool(self.handle);
}
};
const ReleasableType = enum {
buffer,
image,
image_view,
attachment, // view used as an attachment
};
const ReleasableUnion = union(ReleasableType) {
buffer: struct {
handle: vk.Buffer,
allocation: vma.Allocation,
},
image: struct {
handle: vk.Image,
allocation: vma.Allocation,
view: vk.ImageView,
},
image_view: vk.ImageView,
attachment: vk.ImageView,
};
const Releasable = struct {
const Self = @This();
submit_id: SubmitId,
object: ReleasableUnion,
pub fn deinit(self: *const Self, device: *const Device, allocator: Allocator) void {
switch (self.object) {
.buffer => |buffer| {
allocator.handle.destroyBuffer(buffer.handle, buffer.allocation);
},
.image => |image| {
device.dispatch.destroyImageView(device.handle, image.view, null);
allocator.handle.destroyImage(image.handle, image.allocation);
},
.image_view => |image_view| {
device.dispatch.destroyImageView(device.handle, image_view, null);
},
.attachment => |image_view| {
framebuffer.remove(device, image_view);
device.dispatch.destroyImageView(device.handle, image_view, null);
},
}
}
};
const Allocator = struct {
const Self = @This();
mutex: std.Thread.Mutex,
handle: vma.Allocator,
staging_pool: Pool,
device_buffer_pool: Pool,
dynamic_buffer_pool: Pool,
device_texture_pool: Pool,
releasables: []Releasable = undefined,
pub fn init(instance: *const Instance, device: *const Device) !Self {
const handle = try vma.Allocator.create(.{
.vulkanApiVersion = vk.API_VERSION_1_3,
.flags = .{
.memoryBudgetEXT = true,
.externallySynchronized = true,
},
.instance = instance.handle,
.physicalDevice = device.physical_device,
.device = device.handle,
.pAllocationCallbacks = null,
.frameInUseCount = settings.resource_sets,
});
errdefer handle.destroy();
const staging_pool = try Pool.init(handle, 1 << 20, vk.BufferUsageFlags{
.transfer_src_bit = true,
.transfer_dst_bit = true,
}, null, .cpuOnly, .{
.transfer_bit = true,
.graphics_bit = true,
.compute_bit = true,
});
errdefer staging_pool.deinit(handle);
const device_buffer_pool = try Pool.init(handle, 1 << 20, vk.BufferUsageFlags{
.transfer_src_bit = true,
.transfer_dst_bit = true,
.vertex_buffer_bit = true,
.index_buffer_bit = true,
.uniform_buffer_bit = true,
.storage_buffer_bit = true,
}, null, .gpuOnly, .{
.transfer_bit = true,
.graphics_bit = true,
.compute_bit = true,
});
errdefer device_buffer_pool.deinit(handle);
const dynamic_buffer_pool = try Pool.init(handle, 1 << 20, vk.BufferUsageFlags{
.vertex_buffer_bit = true,
.index_buffer_bit = true,
.uniform_buffer_bit = true,
}, null, .cpuToGpu, .{
.graphics_bit = true,
.compute_bit = true,
});
errdefer dynamic_buffer_pool.deinit(handle);
const device_texture_pool = try Pool.init(handle, 1024, null, vk.ImageUsageFlags{
.transfer_dst_bit = true,
.transfer_src_bit = true,
.sampled_bit = true,
.storage_bit = true,
}, .gpuOnly, .{
.transfer_bit = true,
.graphics_bit = true,
.compute_bit = true,
});
errdefer device_texture_pool.deinit(handle);
return Self{
.mutex = std.Thread.Mutex{},
.handle = handle,
.staging_pool = staging_pool,
.device_buffer_pool = device_buffer_pool,
.dynamic_buffer_pool = dynamic_buffer_pool,
.device_texture_pool = device_texture_pool,
};
}
pub fn deinit(self: *Self) void {
self.staging_pool.deinit(self.handle);
self.device_texture_pool.deinit(self.handle);
self.device_buffer_pool.deinit(self.handle);
self.dynamic_buffer_pool.deinit(self.handle);
self.handle.destroy();
}
};

View File

@ -3,87 +3,94 @@ const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const dev = @import("device.zig");
const vkd = dev.DeviceDispatch;
const settings = @import("settings.zig");
const Instance = @import("instance.zig").Instance;
const Device = @import("device.zig").Device;
const Window = @import("display.zig").Window;
const Renderer = @import("Renderer.zig");
const Command = @import("Command.zig");
const sync = @import("sync.zig");
// TODO memory
const gpa = std.heap.GeneralPurposeAllocator(.{}){};
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const present_stages: vk.PipelineStageFlags =
.all_commands_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit;
const present_access: vk.PipelineAccessFlags =
.memory_read_bit |
.memory_write_bit;
const present_stages: vk.PipelineStageFlags = .{ .all_commands_bit = true, .top_of_pipe_bit = true, .bottom_of_pipe_bit = true };
const present_access: vk.AccessFlags = .{
.memory_read_bit = true,
.memory_write_bit = true,
};
const graphics_stages: vk.PipelineStageFlags =
.all_commands_bit |
.all_graphics_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit |
.draw_indirect_bit |
.vertex_input_bit |
.vertex_shader_bit |
.tessellation_control_shader_bit |
.tessellation_evaluation_shader_bit |
.geometry_shader_bit |
.fragment_shader_bit |
.early_fragment_tests_bit |
.late_fragment_tests_bit |
.color_attachment_output_bit |
.conditional_rendering_bit_ext |
.task_shader_bit_nv |
.mesh_shader_bit_nv;
const graphics_access: vk.AccessFlags =
.memory_read_bit |
.memory_write_bit |
.indirect_command_read_bit |
.index_read_bit |
.vertex_attribute_read_bit |
.uniform_read_bit |
.input_attachment_read_bit |
.shader_read_bit |
.shader_write_bit |
.color_attachment_read_bit |
.color_attachment_write_bit |
.conditional_rendering_read_bit_ext |
.acceleration_structure_read_bit_khr |
.acceleration_structure_write_bit_khr;
const graphics_stages: vk.PipelineStageFlags = .{
.all_commands_bit = true,
.all_graphics_bit = true,
.top_of_pipe_bit = true,
.bottom_of_pipe_bit = true,
.draw_indirect_bit = true,
.vertex_input_bit = true,
.vertex_shader_bit = true,
.tessellation_control_shader_bit = true,
.tessellation_evaluation_shader_bit = true,
.geometry_shader_bit = true,
.fragment_shader_bit = true,
.early_fragment_tests_bit = true,
.late_fragment_tests_bit = true,
.color_attachment_output_bit = true,
.conditional_rendering_bit_ext = true,
.task_shader_bit_nv = true,
.mesh_shader_bit_nv = true,
};
const graphics_access: vk.AccessFlags = .{
.memory_read_bit = true,
.memory_write_bit = true,
.indirect_command_read_bit = true,
.index_read_bit = true,
.vertex_attribute_read_bit = true,
.uniform_read_bit = true,
.input_attachment_read_bit = true,
.shader_read_bit = true,
.shader_write_bit = true,
.color_attachment_read_bit = true,
.color_attachment_write_bit = true,
.conditional_rendering_read_bit_ext = true,
.acceleration_structure_read_bit_khr = true,
.acceleration_structure_write_bit_khr = true,
};
const compute_stages: vk.PipelineStageFlags =
.all_commands_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit |
.compute_shader_bit |
.acceleration_structure_build_bit_khr |
.ray_tracing_shader_bit_khr;
const compute_access: vk.AccessFlags =
.memory_read_bit |
.memory_write_bit |
.indirect_command_read_bit |
.uniform_read_bit |
.shader_read_bit |
.shader_write_bit |
.acceleration_structure_read_bit_khr |
.acceleration_structure_write_bit_khr;
const compute_stages: vk.PipelineStageFlags = .{
.all_commands_bit = true,
.top_of_pipe_bit = true,
.bottom_of_pipe_bit = true,
.compute_shader_bit = true,
.acceleration_structure_build_bit_khr = true,
.ray_tracing_shader_bit_khr = true,
};
const compute_access: vk.AccessFlags = .{
.memory_read_bit = true,
.memory_write_bit = true,
.indirect_command_read_bit = true,
.uniform_read_bit = true,
.shader_read_bit = true,
.shader_write_bit = true,
.acceleration_structure_read_bit_khr = true,
.acceleration_structure_write_bit_khr = true,
};
const transfer_stages: vk.PipelineStageFlags =
.all_commands_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit |
.transfer_bit |
.host_bit;
const transfer_access: vk.AccessFlags =
.memory_read_bit |
.memory_write_bit |
.host_read_bit |
.host_write_bit |
.transfer_read_bit |
.transfer_write_bit;
const transfer_stages: vk.PipelineStageFlags = .{
.all_commands_bit = true,
.top_of_pipe_bit = true,
.bottom_of_pipe_bit = true,
.transfer_bit = true,
.host_bit = true,
};
const transfer_access: vk.AccessFlags = .{
.memory_read_bit = true,
.memory_write_bit = true,
.host_read_bit = true,
.host_write_bit = true,
.transfer_read_bit = true,
.transfer_write_bit = true,
};
pub const QueueId = enum(u4) {
graphics,
@ -94,84 +101,94 @@ pub const QueueId = enum(u4) {
pub const count = @typeInfo(@This()).Enum.fields.len;
};
const queues: [QueueId.count]Queue = undefined;
pub const QueueFlags = packed struct {
graphics_bit: bool = false,
compute_bit: bool = false,
transfer_bit: bool = false,
present_bit: bool = false,
pub fn init(device: *dev.Device, window: *dev.Window) !void {
assert(device.phdev);
assert(window.surface);
__reserved_bits_04_31: u28 = 0,
const support = try QueueSupport.init(device.phdev, window.surface);
pub usingnamespace vk.FlagsMixin(@This(), vk.Flags);
};
var s_queues: [QueueId.count]Queue = undefined;
pub fn init(instance: *const Instance, device: *const Device, window: *const Window) !void {
const support = try Support.init(instance, device.physical_device, window);
defer support.deinit();
for (queues) |queue, i| {
queue.init(&support, i);
for (s_queues) |*queue, i| {
queue.* = try Queue.init(device, &support, i);
}
}
pub fn deinit(device: dev.Device) void {
device.waitIdle();
pub fn deinit(device: *const Device) void {
device.waitIdle() catch {};
for (queues) |queue| {
queue.deinit();
for (s_queues) |queue| {
queue.deinit(device);
}
}
pub fn get(id: QueueId) *Queue {
assert(id < queues.len);
return &queues[id];
return &s_queues[@enumToInt(id)];
}
const QueueSupport = struct {
pub const Support = struct {
const Self = @This();
families: [QueueId.count]?i32,
indices: [QueueId.count]i32,
properties: []vk.QueueFamilyProperties,
families: [QueueId.count]?u32 = undefined,
indices: [QueueId.count]QueueId = undefined,
properties: []vk.QueueFamilyProperties = undefined,
pub fn init(device: dev.Device, window: dev.Window) !void {
const self = Self{};
pub fn init(instance: *const Instance, physical_device: vk.PhysicalDevice, window: *const Window) !Self {
var self = Self{};
const count: i32 = undefined;
vkd.physicalDeviceQueueFamilyProperties(device.phdev, &count, null);
self.properties = allocator.alloc(vk.QueueFamilyProperties, count);
vkd.physicalDeviceQueueFamilyProperties(device.phdev, &count, self.properties.ptr);
var count: u32 = undefined;
instance.dispatch.getPhysicalDeviceQueueFamilyProperties(physical_device, &count, null);
self.properties = try allocator.alloc(vk.QueueFamilyProperties, count);
instance.dispatch.getPhysicalDeviceQueueFamilyProperties(physical_device, &count, self.properties.ptr);
self.families[QueueId.graphics] = selectGfxFamily(self.properties);
self.families[QueueId.compute] = selectCompFamily(self.properties);
self.families[QueueId.transfer] = selectXferFamily(self.properties);
self.families[QueueId.present] = selectPresFamily(device.phdev, window.surface, self.properties);
self.families[@enumToInt(QueueId.graphics)] = selectGfxFamily(self.properties);
self.families[@enumToInt(QueueId.compute)] = selectCompFamily(self.properties);
self.families[@enumToInt(QueueId.transfer)] = selectXferFamily(self.properties);
self.families[@enumToInt(QueueId.present)] = try selectPresFamily(instance, physical_device, window.surface, self.properties);
// TODO: don't understand the purpose of 'indices' yet...
const choice_counts = allocator.alloc(i32, count);
const choice_counts = try allocator.alloc(u32, count);
defer allocator.free(choice_counts);
for (self.families) |family| {
const choice_count = choice_counts[family];
self.indices = choice_count;
choice_counts[family] += 1;
for (self.families) |family, i| {
if (family) |f| {
const choice_count = choice_counts[@intCast(usize, f)];
self.indices[i] = @intToEnum(QueueId, choice_count);
// probably wrong
choice_counts[@intCast(usize, f)] = choice_count + 1;
}
}
return self;
}
pub fn deinit(self: *Self) void {
pub fn deinit(self: *const Self) void {
allocator.free(self.properties);
}
fn selectGfxFamily(families: []vk.QueueFamilyProperties) i32 {
var index: ?i32 = null;
var score: u32 = 0;
fn selectGfxFamily(families: []vk.QueueFamilyProperties) ?u32 {
var index: ?u32 = null;
var score: i32 = 0;
for (families) |family, i| {
if (family.queue_count == 0) {
continue;
}
if (family.queue_flags & .graphics_bit) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .compute_bit) 1 else 0;
new_score += if (family.queue_flags & .transfer_bit) 1 else 0;
if (family.queue_flags.graphics_bit) {
var new_score: i32 = 0;
new_score += if (family.queue_flags.compute_bit) @as(i32, 1) else 0;
new_score += if (family.queue_flags.transfer_bit) @as(i32, 1) else 0;
if (new_score > score) {
score = new_score;
index = i;
index = @intCast(u32, i);
}
}
}
@ -179,21 +196,21 @@ const QueueSupport = struct {
return index;
}
fn selectCompFamily(families: []vk.QueueFamilyProperties) i32 {
var index: ?i32 = null;
var score: u32 = 0;
fn selectCompFamily(families: []vk.QueueFamilyProperties) ?u32 {
var index: ?u32 = null;
var score: i32 = 0;
for (families) |family, i| {
if (family.queue_count == 0) {
continue;
}
if (family.queue_flags & .compute_bit) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .graphics_bit) 1 else 0;
new_score += if (family.queue_flags & .transfer_bit) 1 else 0;
if (family.queue_flags.compute_bit) {
var new_score: i32 = 0;
new_score += if (family.queue_flags.graphics_bit) @as(i32, 1) else 0;
new_score += if (family.queue_flags.transfer_bit) @as(i32, 1) else 0;
if (new_score > score) {
score = new_score;
index = i;
index = @intCast(u32, i);
}
}
}
@ -201,21 +218,21 @@ const QueueSupport = struct {
return index;
}
fn selectXferFamily(families: []vk.QueueFamilyProperties) i32 {
var index: ?i32 = null;
var score: u32 = 0;
fn selectXferFamily(families: []vk.QueueFamilyProperties) ?u32 {
var index: ?u32 = null;
var score: i32 = 0;
for (families) |family, i| {
if (family.queue_count == 0) {
continue;
}
if (family.queue_flags & .transfer_bit) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .graphics_bit) 1 else 0;
new_score += if (family.queue_flags & .compute_bit) 1 else 0;
if (family.queue_flags.transfer_bit) {
var new_score: i32 = 0;
new_score += if (family.queue_flags.graphics_bit) @as(i32, 1) else 0;
new_score += if (family.queue_flags.compute_bit) @as(i32, 1) else 0;
if (new_score > score) {
score = new_score;
index = i;
index = @intCast(u32, i);
}
}
}
@ -223,19 +240,20 @@ const QueueSupport = struct {
return index;
}
fn selectPresFamily(phdev: vk.PhysicalDevice, surf: vk.SurfaceKHR, families: []vk.QueueFamilyProperties) !i32 {
var index: ?i32 = null;
var score = 0;
fn selectPresFamily(instance: *const Instance, phdev: vk.PhysicalDevice, surf: vk.SurfaceKHR, families: []vk.QueueFamilyProperties) !?u32 {
var index: ?u32 = null;
var score: i32 = 0;
for (families) |family, i| {
const presentable = try vkd.getPhysicalDeviceSurfaceSupportKHR(phdev, i, surf);
const result = try instance.dispatch.getPhysicalDeviceSurfaceSupportKHR(phdev, @intCast(u32, i), surf);
const presentable = result != 0;
if (presentable) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .graphics_bit) 1 else 0;
new_score += if (family.queue_flags & .compute_bit) 1 else 0;
new_score += if (family.queue_flags & .transfer_bit) 1 else 0;
var new_score: i32 = 0;
new_score += if (family.queue_flags.graphics_bit) @as(i32, 1) else 0;
new_score += if (family.queue_flags.compute_bit) @as(i32, 1) else 0;
new_score += if (family.queue_flags.transfer_bit) @as(i32, 1) else 0;
if (new_score > score) {
score = new_score;
index = i;
index = @intCast(u32, i);
}
}
}
@ -244,72 +262,73 @@ const QueueSupport = struct {
}
};
const Queue = packed struct {
handle: vk.Queue,
family: i32,
index: i32,
access_mask: vk.AccessFlags,
stage_mask: vk.PipelineStageFlags,
queueId: u4,
gfx: bool,
comp: bool,
xfer: bool,
pres: bool,
pub const Queue = struct {
handle: vk.Queue = undefined,
family: u32 = 0,
index: QueueId = 0,
access_mask: vk.AccessFlags = .{},
stage_mask: vk.PipelineStageFlags = .{},
queueId: u4 = 0,
gfx: bool = false,
comp: bool = false,
xfer: bool = false,
pres: bool = false,
cmd_pool: vk.CommandPool,
cmds: [Renderer.cmds_per_queue]vk.CommandBuffer,
cmd_fences: [Renderer.cmds_per_queue]vk.Fence,
cmd_ids: [Renderer.cmds_per_queue]u32,
head: u32,
tail: u32,
cmd_pool: Command.Pool = undefined,
cmds: [settings.cmds_per_queue]vk.CommandBuffer = undefined,
cmd_fences: [settings.cmds_per_queue]sync.Fence = undefined,
cmd_ids: [settings.cmds_per_queue]u32 = undefined,
head: u32 = 0,
tail: u32 = 0,
const Self = @This();
pub fn init(self: *Self, device: dev.Device, support: *QueueSupport, id: i32) !void {
const family = support.family[id];
const index = support.index[id];
assert(family >= 0);
assert(index >= 0);
pub fn init(device: *const Device, support: *const Support, id: usize) !Self {
const family = support.families[id] orelse unreachable; // fix this silliness
const index = support.indices[id];
const handle = device.dispatch.getDeviceQueue(device.handle, family, @enumToInt(index));
const handle = try vkd.getDeviceQueue(device, family, index);
var self = Self{
.family = family,
.index = index,
.handle = handle,
};
self.family = family;
self.index = index;
self.handle = handle;
const pres_family = support.families[@enumToInt(QueueId.present)];
const queue_flags = support.properties[@enumToInt(QueueId.present)].queue_flags;
const pres_family = support.family[QueueId.present];
const queue_flags = support.properties.queue_flags;
if (queue_flags & .graphics_bit) {
if (queue_flags.graphics_bit) {
self.gfx = true;
self.stage_mask |= graphics_stages;
self.access_mask |= graphics_access;
self.stage_mask = self.stage_mask.merge(graphics_stages);
self.access_mask = self.access_mask.merge(graphics_access);
}
if (queue_flags & .compute_bit) {
if (queue_flags.compute_bit) {
self.comp = true;
self.stage_mask |= compute_stages;
self.access_mask |= compute_access;
self.stage_mask = self.stage_mask.merge(compute_stages);
self.access_mask = self.access_mask.merge(compute_access);
}
if (queue_flags & .transfer_bit) {
if (queue_flags.transfer_bit) {
self.xfer = true;
self.stage_mask |= transfer_stages;
self.access_mask |= transfer_access;
self.stage_mask = self.stage_mask.merge(transfer_stages);
self.access_mask = self.access_mask.merge(transfer_access);
}
if (family == pres_family) {
self.pres = true;
self.stage_mask |= present_stages;
self.access_mask |= present_access;
self.stage_mask = self.stage_mask.merge(present_stages);
self.access_mask = self.access_mask.merge(present_access);
}
assert(self.stage_mask != 0);
assert(self.access_mask != 0);
assert(self.stage_mask.toInt() != 0);
assert(self.access_mask.toInt() != 0);
try Command.init(self, id);
try Command.init(device, &self, @intToEnum(QueueId, id));
return self;
}
pub fn deinit(self: *Self) void {
Command.deinit(self);
pub fn deinit(self: *const Self, device: *const Device) void {
Command.deinit(device, self);
}
};

View File

@ -17,6 +17,6 @@ pub const desired_swapchain_len = 2;
pub const resource_sets = 2;
pub const cmds_per_queue = 64;
pub fn OnlyIf(comptime setting: bool, t: anytype) t {
pub fn OnlyIf(comptime setting: bool, t: anytype) type {
return if (setting) t else void;
}

View File

@ -0,0 +1,298 @@
const std = @import("std");
const vk = @import("vulkan");
const math = @import("std").math;
const profiler = @import("../../common/profiler.zig");
const settings = @import("settings.zig");
const Instance = @import("instance.zig").Instance;
const Device = @import("device.zig").Device;
const Window = @import("display.zig").Window;
const sync = @import("sync.zig");
const queues = @import("queues.zig");
const Image = @import("image.zig").Image;
// TODO memory
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const preferred_present_modes = [_]vk.PresentModeKHR{
.mailbox_khr,
.immediate_khr,
.fifo_relaxed_khr,
.fifo_khr,
};
const preferred_surface_formats = [_]vk.SurfaceFormatKHR{
// PQ Rec2100
// https://en.wikipedia.org/wiki/Rec._2100
// https://en.wikipedia.org/wiki/High-dynamic-range_video#Perceptual_quantizer
if (settings.hdr_on) .{ .format = .a2r10g10b10_unorm_pack32, .color_space = .hdr10_st2084_ext } else void,
if (settings.hdr_on) .{ .format = .a2b10g10r10_unorm_pack32, .color_space = .hdr10_st2084_ext } else void,
// 10-bit sRGB
if (settings.hdr_on) .{ .format = .a2r10g10b10_unorm_pack32, .color_space = .srgb_nonlinear_khr } else void,
if (settings.hdr_on) .{ .format = .a2b10g10r10_unorm_pack32, .color_space = .srgb_nonlinear_khr } else void,
// 8 bit sRGB
.{ .format = .r8g8b8a8_srgb, .color_space = .srgb_nonlinear_khr },
.{ .format = .b8g8r8a8_srgb, .color_space = .srgb_nonlinear_khr },
};
const Support = struct {
const Self = @This();
capabilities: vk.SurfaceCapabilitiesKHR,
formats: []vk.SurfaceFormatKHR,
modes: []vk.PresentModeKHR,
pub fn init(instance: *const Instance, device: *const Device, window: *const Window) !Self {
const capabilities = try instance.dispatch.getPhysicalDeviceSurfaceCapabilitiesKHR(device.physical_device, window.surface);
var format_count: u32 = undefined;
_ = try instance.dispatch.getPhysicalDeviceSurfaceFormatsKHR(device.physical_device, window.surface, &format_count, null);
const formats = try allocator.alloc(vk.SurfaceFormatKHR, format_count);
errdefer allocator.free(formats);
_ = try instance.dispatch.getPhysicalDeviceSurfaceFormatsKHR(device.physical_device, window.surface, &format_count, formats.ptr);
var mode_count: u32 = undefined;
_ = try instance.dispatch.getPhysicalDeviceSurfacePresentModesKHR(device.physical_device, window.surface, &mode_count, null);
const modes = try allocator.alloc(vk.PresentModeKHR, format_count);
errdefer allocator.free(modes);
_ = try instance.dispatch.getPhysicalDeviceSurfacePresentModesKHR(device.physical_device, window.surface, &mode_count, modes.ptr);
return Self{
.capabilities = capabilities,
.formats = formats,
.modes = modes,
};
}
pub fn deinit(self: *const Self) void {
allocator.free(self.formats);
}
pub fn selectSwapFormat(self: *const Self) !vk.SurfaceFormatKHR {
if (self.formats.len > 0) {
return error.NoSurfaceFormats;
}
for (preferred_surface_formats) |preferred_format| {
for (self.formats) |format| {
if (std.meta.eql(format, preferred_format)) {
return format;
}
}
}
return self.formats[0];
}
pub fn selectSwapMode(self: *const Self) !vk.PresentModeKHR {
if (self.modes.len <= 0) {
return error.NoPresentModes;
}
for (preferred_present_modes) |preferred_mode| {
for (self.modes) |mode| {
if (preferred_mode == mode) {
return mode;
}
}
}
return self.modes[0];
}
pub fn selectSwapExtent(self: *const Self, window: *const Window) vk.Extent2D {
if (self.capabilities.current_extent.width != ~@as(u32, 0)) {
return self.capabilities.current_extent;
}
const min_ext = self.capabilities.min_image_extent;
const max_ext = self.capabilities.max_image_extent;
return vk.Extent2D{
.width = math.clamp(window.size.width, min_ext.width, max_ext.width),
.height = math.clamp(window.size.height, min_ext.height, max_ext.height),
};
}
};
pub const SubmitId = packed struct {
counter: u32,
queue_id: queues.QueueId,
valid: bool,
const pm_wait = profiler.ProfileMark.init("swapchain.SubmitId.wait");
pub fn wait(self: *const SubmitId, device: *const Device) void {
pm_wait.begin() catch {};
defer pm_wait.end();
std.debug.assert(self.valid);
const queue = queues.get(self.queue_id);
const ring_mask = queue.cmds.len - 1;
while ((self.counter - queue.tail) < queue.cmds.len) : (queue.tail += 1) {
const i = queue.tail & ring_mask;
std.debug.assert(queue.cmd_ids[i] == queue.tail);
queue.cmd_fences[i].wait(device) catch {};
device.dispatch.resetCommandBuffer(queue.cmds[i], .{}) catch {};
queue.cmd_ids[i] = 0;
}
}
};
pub const Swapchain = struct {
const Self = @This();
handle: vk.SwapchainKHR,
color_format: vk.Format,
color_space: vk.ColorSpaceKHR,
mode: vk.PresentModeKHR,
width: u32,
height: u32,
length: u32,
image_index: usize = 0,
image_submits: [settings.max_swapchain_len]SubmitId = undefined,
images: [settings.max_swapchain_len]Image = undefined,
sync_index: usize = 0,
sync_submits: [settings.resource_sets]SubmitId = undefined,
available_semas: [settings.resource_sets]sync.Semaphore = undefined,
rendered_semas: [settings.resource_sets]sync.Semaphore = undefined,
pub fn init(instance: *const Instance, window: *const Window, device: *const Device, previous: ?*Self) !Self {
const support = try Support.init(instance, device, window);
defer support.deinit();
const queue_support = try queues.Support.init(instance, device.physical_device, window);
defer queue_support.deinit();
const format = try support.selectSwapFormat();
const mode = try support.selectSwapMode();
const ext = support.selectSwapExtent(window);
var img_count = math.clamp(settings.desired_swapchain_len, support.capabilities.min_image_count, math.min(settings.max_swapchain_len, support.capabilities.max_image_count));
const families = [_]u32{
queue_support.families[@enumToInt(queues.QueueId.graphics)] orelse unreachable,
queue_support.families[@enumToInt(queues.QueueId.present)] orelse unreachable,
};
const concurrent = families[0] != families[1];
const usage: vk.ImageUsageFlags = .{ .color_attachment_bit = true };
const swap_info = vk.SwapchainCreateInfoKHR{
.s_type = .swapchain_create_info_khr,
.flags = .{},
.surface = window.surface,
.present_mode = mode,
.min_image_count = img_count,
.image_format = format.format,
.image_color_space = format.color_space,
.image_extent = ext,
.image_array_layers = 1,
.image_usage = usage,
.image_sharing_mode = if (concurrent) .concurrent else .exclusive,
.queue_family_index_count = if (concurrent) families.len else 0,
.p_queue_family_indices = &families,
.pre_transform = support.capabilities.current_transform,
// no compositing with window manager / desktop background
.composite_alpha = .{ .opaque_bit_khr = true },
// don't render pixels behind other windows
.clipped = 1,
// prev swapchain, if recreating
.old_swapchain = if (previous) |p| p.handle else .null_handle,
};
const handle = try device.dispatch.createSwapchainKHR(device.handle, &swap_info, null);
errdefer device.dispatch.destroySwapchainKHR(device.handle, handle, null);
if (previous == null) {
std.debug.print("Present mode: '{s}'", .{@tagName(mode)});
std.debug.print("Present extent: '{} x {}'", .{ ext.width, ext.height });
std.debug.print("Present images: '{}'", .{img_count});
std.debug.print("Present sharing mode: '{s}'", .{if (concurrent) @as([]const u8, "Concurrent") else "Exclusive"});
std.debug.print("Color space: '{s}'", .{@tagName(format.color_space)});
std.debug.print("Format: '{s}'", .{@tagName(format.format)});
}
var images: [settings.max_swapchain_len]vk.Image = undefined;
_ = try device.dispatch.getSwapchainImagesKHR(device.handle, handle, &img_count, null);
if (img_count > settings.max_swapchain_len) {
return error.TooManyImages;
}
_ = try device.dispatch.getSwapchainImagesKHR(device.handle, handle, &img_count, &images);
var self = Self{
.handle = handle,
.mode = mode,
.color_format = format.format,
.color_space = format.color_space,
.width = ext.width,
.height = ext.height,
.length = img_count,
};
for (images) |img, i| {
try Image.import(&self.images[i], device, &.{
.s_type = .image_create_info,
.flags = .{},
.image_type = .@"2d",
.format = self.color_format,
.extent = .{
.width = ext.width,
.height = ext.height,
.depth = 1,
},
.mip_levels = 1,
.array_layers = 1,
.samples = .{ .@"1_bit" = true },
.tiling = .optimal,
.usage = usage,
.sharing_mode = swap_info.image_sharing_mode,
.queue_family_index_count = swap_info.queue_family_index_count,
.p_queue_family_indices = swap_info.p_queue_family_indices,
.initial_layout = .@"undefined",
}, img);
}
{
var i: usize = 0;
while (i < settings.resource_sets) : (i += 1) {
self.available_semas[i] = try sync.Semaphore.init(device);
self.rendered_semas[i] = try sync.Semaphore.init(device);
}
}
return self;
}
pub fn deinit(self: *const Self, device: *const Device) void {
device.waitIdle() catch {};
for (self.available_semas) |_, i| {
self.available_semas[i].deinit(device);
self.rendered_semas[i].deinit(device);
}
for (self.images) |image| {
image.deinit(device);
}
}
const pm_acquiresync = profiler.ProfileMark.init("Swapchain.acquireSync");
pub fn acquireSync(self: *Self, device: *const Device) !usize {
try pm_acquiresync.begin();
defer pm_acquiresync.end();
const sync_index = (self.sync_index + 1) % self.sync_submits.len;
const submit = self.sync_submits[sync_index];
if (submit.valid) {
submit.wait(device);
}
self.sync_index = sync_index;
return sync_index;
}
};

View File

@ -1,6 +1,7 @@
const assert = @import("std").debug.assert;
const std = @import("std");
const assert = std.debug.assert;
const vk = @import("vulkan");
const vkd = @import("device.zig").DeviceDispatch;
const Device = @import("device.zig").Device;
const Renderer = @import("Renderer.zig");
@ -10,11 +11,10 @@ pub const Semaphore = struct {
handle: vk.Semaphore,
pub fn init(device: *Device) !Self {
assert(device.dev);
const handle = try vkd.createSemaphore(device.dev, .{
pub fn init(device: *const Device) !Self {
const handle = try device.dispatch.createSemaphore(device.handle, &.{
.s_type = .semaphore_create_info,
.flags = .{},
}, null);
return Self{
@ -22,8 +22,8 @@ pub const Semaphore = struct {
};
}
pub fn deinit(self: Self, device: *Device) void {
vkd.destroySemaphore(device.dev, self.handle, null);
pub fn deinit(self: Self, device: *const Device) void {
device.dispatch.destroySemaphore(device.handle, self.handle, null);
}
};
@ -31,45 +31,38 @@ pub const Fence = struct {
const Self = @This();
pub const State = enum(i32) {
signaled = vk.Result.success,
unsignaled = vk.Result.not_ready,
signaled = @enumToInt(vk.Result.success),
unsignaled = @enumToInt(vk.Result.not_ready),
};
handle: vk.Fence,
pub fn init(device: *Device, signaled: bool) !Self {
assert(device.dev);
const handle = try vkd.createFence(device.dev, .{
.s_type = .fence_create_info,
.flags = if (signaled) .signaled_bit else void,
}, null);
pub fn init(device: *const Device, signaled: bool) !Self {
const handle = try device.dispatch.createFence(device.handle, &.{ .s_type = .fence_create_info, .flags = .{
.signaled_bit = if (signaled) true else false,
} }, null);
return Self{
.handle = handle,
};
}
pub fn deinit(self: *Self, device: *Device) void {
vkd.destroyFence(device.dev, self.handle, null);
pub fn deinit(self: *const Self, device: *const Device) void {
device.dispatch.destroyFence(device.handle, self.handle, null);
}
pub fn reset(self: *Self, device: *Device) !void {
assert(device.dev);
try vkd.resetFences(device.dev, 1, &self.handle);
pub fn reset(self: *const Self, device: *const Device) !void {
try device.dispatch.resetFences(device.handle, 1, &self.handle);
}
pub fn wait(self: *Self, device: *Device) !void {
assert(device.dev);
const timeout: u64 = -1;
while (self.stat() != .signaled) {
try vkd.waitForFences(device.dev, 1, &self.handle, false, timeout);
pub fn wait(self: *const Self, device: *const Device) !void {
const timeout = std.math.maxInt(u64);
while ((try self.stat(device)) != .signaled) {
_ = try device.dispatch.waitForFences(device.handle, 1, @ptrCast([*]const vk.Fence, &self.handle), vk.FALSE, timeout);
}
}
pub fn stat(self: *Self, device: *Device) !State {
assert(device.dev);
return try vkd.getFenceStatus(device.dev, self.handle);
pub fn stat(self: *const Self, device: *const Device) !State {
return @intToEnum(State, @enumToInt(try device.dispatch.getFenceStatus(device.handle, self.handle)));
}
};