forked from vv/efemra
1
0
Fork 0

continuing to add functionality most recently swapchain (wip)

This commit is contained in:
Vivianne 2022-07-06 01:57:21 -07:00
parent f467db6b89
commit c767da6fe2
20 changed files with 1615 additions and 213 deletions

View File

@ -2,8 +2,7 @@ const std = @import("std");
const glfw = @import("glfw");
const vk = @import("vulkan");
const resources = @import("resources");
const GraphicsContext = @import("render/graphics_context.zig").GraphicsContext;
const VulkanRenderer = @import("render/vulkan_renderer.zig").VulkanRenderer;
const Renderer = @import("rendering/vulkan/Renderer.zig");
// TODO:
const Allocator = std.mem.Allocator;
@ -20,16 +19,15 @@ pub fn main() !void {
defer window.destroy();
// temp allocator
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
//var gpa = std.heap.GeneralPurposeAllocator(.{}){};
//defer _ = gpa.deinit();
//const allocator = gpa.allocator();
const gc = try GraphicsContext.init(allocator, app_name, &window);
defer gc.deinit();
var renderer = try Renderer.init();
defer renderer.deinit();
std.debug.print("Using device: {s}\n", .{gc.deviceName()});
std.debug.print("Using device: {s}\n", .{renderer.device.getName()});
var renderer = try VulkanRenderer.init(&gc, allocator, extent, &window);
defer renderer.deinit();
while (!window.shouldClose()) {

View File

@ -0,0 +1,169 @@
const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const vkd = @import("device.zig").DeviceDispatch;
const Device = @import("device.zig").Device;
const ProfileMark = @import("profiler").ProfileMark;
const Renderer = @import("Renderer.zig");
const Fence = @import("sync.zig").Fence;
const queues = @import("queues.zig");
const write_access: vk.AccessFlags =
.shader_write_bit |
.color_attachment_write_bit |
.depth_stencil_attachment_write_bit |
.transfer_write_bit |
.host_write_bit |
.memory_write_bit |
.transform_feedback_write_bit_ext |
.transform_feedback_counter_write_bit_ext |
.acceleration_structure_write_bit_khr |
.command_preprocess_write_bit_nv;
const read_access: vk.AccessFlags =
.indirect_command_read_bit |
.index_read_bit |
.vertex_attribute_read_bit |
.uniform_read_bit |
.input_attachment_read_bit |
.shader_read_bit |
.color_attachment_read_bit |
.shader_read_bit |
.depth_stencil_attachment_read_bit |
.transfer_read_bit |
.host_read_bit |
.memory_read_bit |
.transform_feedback_counter_read_bit_ext |
.conditional_rendering_read_bit_ext |
.color_attachment_read_noncoherent_bit_ext |
.acceleration_structure_read_bit_khr |
.fragment_density_map_read_bit_ext |
.fragment_shading_rate_attachment_read_bit_khr |
.command_preprocess_read_bit_nv;
const pm_init = ProfileMark("Command.init");
pub fn init(device: *Device, queue: queues.Queue, id: queues.QueueId) !void {
pm_init.start();
defer pm_init.end();
assert(queue.handle);
assert(queue == queues.get(id));
assert(device.dev);
assert(!queue.cmd_pool);
queue.queue_id = id;
queue.cmd_pool = try Pool.init(queue.family, .reset_cmd_buffer_bit);
errdefer queue.cmd_pool.deinit();
queue.cmds = try vkd.allocateCommandbuffers(device.dev, .{
.s_type = .command_buffer_allocate_info,
.command_pool = queue.cmd_pool,
.level = .primary,
.command_buffer_count = queue.cmds.len,
});
for (queue.cmds) |_, i| {
queue.cmd_fences[i] = try Fence.init(true);
queue.cmd_ids[i] = 0;
}
}
const pm_deinit = ProfileMark("Command.deinit");
pub fn deinit(queue: *queues.Queue, device: *Device) void {
pm_init.start();
defer pm_init.end();
assert(device.dev);
for (queue.cmd_fences) |fence| {
fence.deinit();
}
queue.cmd_pool.deinit();
}
pub const Buffer = packed struct {
const Self = @This();
handle: vk.CommandBuffer,
fence: vk.Fence,
id: u32,
queue_id: queues.QueueId,
// indicates which types of cmds are legal in this cmdbuf
gfx: bool,
comp: bool,
xfer: bool,
pres: bool,
// cmd state
began: bool,
ended: bool,
submit: bool,
is_render_pass: bool,
subpass: u8,
queue_transfer_src: bool,
queue_transfer_dst: bool,
// XXX: I do not really understand this?
pub fn init(self: *Self, queue: queues.Queue) !void {
const ring_mask: u32 = queue.cmds.len - 1;
// TODO: decode meaning of SASSERT((NELEM(queue->cmds) & (NELEM(queue->cmds) - 1u)) == 0);
const head_id = queue.head;
queue.head += 1;
while (head_id - queue.tail >= queue.cmds.len) {
const tail_slot = queue.tail & ring_mask;
assert(queue.cmd_ids[tail_slot] == queue.tail);
const tail_fence = queue.fences[tail_slot];
try tail_fence.wait();
queue.ids[tail_slot] = 0;
queue.tail += 1;
}
const head_slot = head_id & ring_mask;
assert(!queue.ids[head_slot]);
queue.ids[head_slot] = head_id;
const cmd = queue.cmds[head_slot];
const fence = queue.fences[head_slot];
assert(fence);
assert(cmd);
assert(fence.stat() == .signaled);
fence.reset();
self.handle = cmd;
self.fence = fence;
self.id = head_id;
self.queue_id = queue.queue_id;
self.gfx = queue.gfx;
self.comp = queue.comp;
self.xfer = queue.xfer;
self.pres = queue.pres;
}
};
pub const Pool = struct {
const Self = @This();
handle: vk.CommandPool,
pub fn init(device: *Device, family: vk.QueueFamilyProperties, flags: vk.CommandPoolCreateFlags) !Self {
const handle = try vkd.createCommandPool(device.dev, &.{
.s_type = .command_pool_create_info,
.flags = flags,
.queue_family_index = family,
}, null);
return Self{
.handle = handle,
};
}
pub fn deinit(self: *Self, device: *Device) void {
vkd.destroyCommandPool(device.dev, self.handle, null);
}
pub fn reset(self: *Self, device: *Device, flags: vk.CommandPoolResetFlagBits) !void {
try vkd.resetCommandPool(device.dev, self.handle, flags);
}
};

View File

@ -0,0 +1,38 @@
const std = @import("std");
const vk = @import("vulkan");
const settings = @import("settings.zig");
const Instance = @import("instance.zig").Instance;
const Self = @This();
messenger: vk.DebugUtilsMessengerCreateInfoEXT,
pub fn init(instance: *Instance) !Self {
const self = Self{};
if (settings.messenger_on) {
self.messenger = try vk.CreateDebugUtilsCreateMessengerEXT(instance.vkInst, .{
.s_type = .debug_utils_messenger_create_info_ext,
.message_severity = .error_bit_ext | .warning_bit_ext | .info_bit_ext,
.message_type = .general_bit_ext | .validation_bit_ext | .performance_bit_ext,
.pfn_user_callback = onVulkanMessage,
}, null);
}
return self;
}
pub fn deinit(self: *Self, instance: *Instance) void {
if (settings.messenger_on) {
vk.destroyDebugUtilsMessengerEXT(instance.vkInst, self.messenger, null);
}
}
fn onVulkanMessage(message_severity: vk.DebugUtilsMessengerSeverityFlagBitsEXT, message_type: vk.DebugUtilsMessageTypeFlagsEXT, callback_data: *const vk.DebugUtilsMessengerCallbackDataEXT, user_data: *anyopaque) bool {
//var shouldLog = false;
// TODO: log severity
_ = message_severity;
_ = message_type;
_ = user_data;
std.debug.print("vkr: {s}\n", callback_data.p_message);
}

View File

@ -0,0 +1,66 @@
const std = @import("std");
const vk = @import("vulkan");
const settings = @import("settings");
pub const Device = struct {
khr_swapchain: bool,
ext_memory_budget: bool,
ext_hdr_metadata: bool,
khr_shader_float16_int8: bool,
khr_16bit_storage: bool,
khr_push_descriptor: bool,
ext_memory_priority: bool,
khr_bind_memory2: bool,
khr_shader_float_controls: bool,
khr_spirv_1_4: bool,
ext_conditional_rendering: bool,
khr_draw_indirect_count: bool,
khr_acceleration_structure: if (settings.rt_on) bool else void,
khr_ray_tracing_pipeline: if (settings.rt_on) bool else void,
khr_ray_query: if (settings.rt_on) bool else void,
khr_deferred_host_operations: if (settings.rt_on) bool else void,
pub fn get(props: []vk.ExtensionProperties) @This() {
return getExtensions(@This(), props);
}
pub fn toArray(self: *Device, allocator: std.mem.Allocator) []u8 {
return toArray(Device, self, allocator);
}
};
pub const Instance = struct {
khr_get_physical_device_properties2: bool,
ext_swapchain_colorspace: bool,
ext_debug_utils: if (settings.messenger_on) bool else void,
pub fn get(props: []vk.ExtensionProperties) @This() {
return getExtensions(@This(), props);
}
pub fn toArray(self: *Instance, allocator: std.mem.Allocator) []u8 {
return extToArray(Instance, self, allocator);
}
};
fn getExtensions(comptime T: type, props: []vk.ExtensionProperties) T {
const ext: T = undefined;
for (@typeInfo(T).Struct.fields) |field| {
for (props) |prop| {
if (std.ascii.eqlIgnoreCase([*:0]u8, prop.extension_name, "vk_" ++ field.name)) {
@field(ext, field.name) = true;
}
}
}
}
pub fn extToArray(comptime T: type, allocator: std.mem.Allocator) []u8 {
const arr = allocator.alloc([]u8, @typeInfo(T).Struct.fields.len);
for (@typeInfo(T).Struct.fields) |field, i| {
const name_info: vk.extension_info.Info = @field(vk.extension_info, field.name);
arr[i] = name_info.name;
}
return arr;
}

View File

@ -0,0 +1,161 @@
const std = @import("std");
const Instance = @import("instance.zig").Instance;
const Display = @import("display.zig").Display;
const Window = @import("display.zig").Window;
const Device = @import("device.zig").Device;
const Memory = @import("memory.zig").Memory;
const Framebuffer = @import("framebuffer.zig").Framebuffer;
const Swapchain = @import("swapchain.zig").Swapchain;
const Context = @import("context.zig").Context;
const MainPass = @import("main_pass.zig").MainPass;
const Command = @import("Command.zig");
instance: Instance,
window: Window,
device: Device,
memory: Memory,
framebuffer: Framebuffer,
swapchain: Swapchain,
context: Context,
// sampler: Sampler, TODO
// texTable: TexTable, TODO
// bindings: Bindings, TODO
// targets: Targets, TODO
// meshSys: MeshSys, TODO
// imSys: ImSys, TODO
mainPass: MainPass,
const Self = @This();
pub fn init() !Self {
const self = Self{};
errdefer std.debug.print("failed to init VulkanRenderer", .{});
errdefer self.deinit();
try self.instance.init();
try windowInit(&self.window);
try self.device.init();
try self.memory.init();
try self.framebuffer.init();
try self.swapchain.init(null);
try self.context.init();
// try self.sampler.init();
// try self.texTable.init();
// try self.bindings.init();
// try self.targets.init();
// try self.meshSys.init();
// try self.imSys.init();
try self.mainPass.init();
return self;
}
pub fn update(self: Self) bool {
// TODO profiling
// base system update
{
self.swapchain.acquireSync();
self.swapchain.acquireImage();
self.memory.update();
}
// system update
{
// TODO: sampler/meshsys/imsys update
}
// setup phase
{
self.mainPass.setup();
// TODO textable update
Command.flush();
// TODO bindings update
}
// execute phase
self.mainPass.execute();
// present phase
self.swapchain.submit(Command.get());
// background work
{
// TODO upload lightmaps, imsys clear
Command.flush();
}
}
pub fn deinit(self: Self) void {
self.device.waitIdle();
// TODO: delete lightmap pack
self.mainPass.deinit();
// self.imSys.deinit();
// self.meshSys.deinit();
// self.targets.deinit();
// self.bindings.deinit();
// self.texTable.deinit();
// self.sampler.deinit();
// clear other passes here.
self.memory.finalize();
self.context.deinit();
self.swapchain.deinit();
self.framebuffer.deinit();
self.window.deinit();
self.memory.deinit();
self.device.deinit();
self.instance.deinit();
}
fn windowInit(window: *Window) !void {
// TODO: convar cv_fullscreen
const fullscreen = false;
const extents = try Display.getSize(fullscreen);
try window.init("efemra", extents, fullscreen);
// TODO: convar r_width/r_height set
// TODO: UISys_Init
}
fn windowDeinit(window: *Window) void {
// TODO: uisys_shutdown
// TODO: convar r_width/r_height unset
window.deinit();
}
fn windowUpdate(self: Self) !void {
if (!self.window.isOpen()) {
return error.WindowNotOpen;
}
// TODO: convar fullscreen change check
if (0) {
self.device.waitIdle();
// TODO: UI pass del
self.swapchain.deinit();
self.window.deinit();
self.window.init();
self.swapchain.init(null);
// TODO: UI pass new
}
if (self.window.updateSize()) {
try self.swapchain.recreate();
// TODO set r_width/r_height convar
}
if (!self.swapchain.handle) {
return error.NoSwapchainHandle;
}
self.targets.maybeRecreate();
}

View File

@ -0,0 +1,93 @@
const vk = @import("vulkan");
const math = @import("std").math;
const settings = @import("settings.zig");
const dev = @import("device.zig");
const vkd = dev.DeviceDispatch;
const queues = @import("queues.zig");
const Self = @This();
const preferred_present_modes = [_]vk.PresentModeKHR{
.mailbox_khr,
.immediate_khr,
.fifo_relaxed_khr,
.fifo_khr,
};
const preferred_surface_formats = [_]vk.PreferredSurfaceFormats{
// PQ Rec2100
// https://en.wikipedia.org/wiki/Rec._2100
// https://en.wikipedia.org/wiki/High-dynamic-range_video#Perceptual_quantizer
if (settings.hdr_on) .{ .a2r10g10b10_unorm_pack32, .hdr10_st2084_ext } else void,
if (settings.hdr_on) .{ .a2b10g10r10_unorm_pack32, .hdr10_st2084_ext } else void,
// 10-bit sRGB
if (settings.hdr_on) .{ .a2r10g10b10_unorm_pack32, .srgb_nonlinear_khr } else void,
if (settings.hdr_on) .{ .a2b10g10r10_unorm_pack32, .srgb_nonlinear_khr } else void,
// 8 bit sRGB
.{ .r8g8b8a8_srgb, .srgb_nonlinear_khr },
.{ .b8g8r8a8_srgb, .srgb_nonlinear_khr },
};
const Support = struct {
caps: vk.SurfaceCapabilitiesKHR,
formatCount: u32,
formats: []vk.SurfaceFormatKHR,
modeCount: u32,
modes: []vk.PresentModeKHR,
};
pub const SwapchainSupport = struct {
pub fn init(device: *dev.Device, window: *dev.Window) !SwapchainSupport {
}
};
pub fn init(window: *dev.Window, device: *dev.Device, previous: ?*Self) !void {
const support = try SwapchainSupport.init(dev, window.surface);
defer support.deinit();
const queue_support = try queues.QueueSupport.init(device, window);
defer queue_support.deinit();
const format = support.selectSwapFormat();
const mode = support.selectSwapMode();
const ext = support.selectSwapExtent(window);
const img_count = math.clamp(
desired_swapchain_len,
support.capabilities.min_image_count,
math.min(max_swapchain_len, support.capabilities.max_image_count));
const families = [_]u32{
queue_support.family[QueueId.graphics],
queue_support.family[QueueId.present],
};
const concurrent = families[0] != families[1];
const usage: vk.ImageUsage = .color_attachment_bit;
const handle = try vkd.createSwapchainKHR(device.dev, .{
.s_type = .swapchain_create_info_khr,
.surface = window.surface,
.present_mode = mode,
.min_image_count = img_count,
.image_format = format.format,
.image_color_space = format.color_space,
.image_extent = ext,
.image_array_layers = 1,
.image_usage = usage,
.image_sharing_mode = if (concurrent) .concurrent else .exclusive,
.queue_family_index_count = if (concurrent) families.len else 0,
.p_queue_family_indices = families,
.pre_transform = support.capabilities.current_transform,
// no compositing with window manager / desktop background
.composite_alpha = .opaque_bit_khr,
// don't render pixels behind other windows
.clipped = true,
// prev swapchain, if recreating
.old_swapchain = prev orelse null,
}, null);
// TODO: use @tagName to get name of enum!
}

View File

@ -0,0 +1,5 @@
const vk = @import("vulkan");
const BaseDispatch = vk.BaseWrapper(.{
.createInstance = true,
});

View File

@ -0,0 +1,535 @@
const vk = @import("vulkan");
const glfw = @import("glfw");
const log2 = @import("std").math.log2;
const settings = @import("settings.zig");
const OnlyIf = settings.OnlyIf;
const Extensions = @import("Extensions.zig");
const Instance = @import("instance.zig").Instance;
const layers = @import("layers.zig").layers;
const Renderer = @import("Renderer.zig");
const Window = @import("device.zig").Window;
const queues = @import("queues.zig");
const vki = @import("instance.zig").InstanceDispatch;
const std = @import("std");
// TODO memory
const gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const DeviceDispatch = vk.DeviceWrapper(.{
.destroyDevice = true,
.getDeviceQueue = true,
.createSemaphore = true,
.createFence = true,
.createImageView = true,
.destroyImageView = true,
.destroySemaphore = true,
.destroyFence = true,
.getSwapchainImagesKHR = true,
.createSwapchainKHR = true,
.destroySwapchainKHR = true,
.acquireNextImageKHR = true,
.deviceWaitIdle = true,
.waitForFences = true,
.resetFences = true,
.queueSubmit = true,
.queuePresentKHR = true,
.createCommandPool = true,
.destroyCommandPool = true,
.allocateCommandBuffers = true,
.freeCommandBuffers = true,
.queueWaitIdle = true,
.createShaderModule = true,
.destroyShaderModule = true,
.createPipelineLayout = true,
.destroyPipelineLayout = true,
.createRenderPass = true,
.destroyRenderPass = true,
.createGraphicsPipelines = true,
.destroyPipeline = true,
.createFramebuffer = true,
.destroyFramebuffer = true,
.beginCommandBuffer = true,
.endCommandBuffer = true,
.allocateMemory = true,
.freeMemory = true,
.createBuffer = true,
.destroyBuffer = true,
.getBufferMemoryRequirements = true,
.mapMemory = true,
.unmapMemory = true,
.bindBufferMemory = true,
.cmdBeginRenderPass = true,
.cmdEndRenderPass = true,
.cmdBindPipeline = true,
.cmdDraw = true,
.cmdSetViewport = true,
.cmdSetScissor = true,
.cmdBindVertexBuffers = true,
.cmdCopyBuffer = true,
});
const vkd = DeviceDispatch;
const Props = struct {
phdev: vk.PhysicalDeviceProperties2,
accstr: OnlyIf(settings.rt_on, vk.PhysicalDeviceAccelerationStructurePropertiesKHR),
rtpipe: OnlyIf(settings.rt_on, vk.PhysicalDeviceRaytracingPipelinePropertiesKHR),
pub fn getName(self: *Props) []u8 {
return self.phdev.device_name;
}
};
const Features = struct {
phdev: vk.PhysicalDeviceFeatures2,
accstr: OnlyIf(settings.rt_on, vk.PhysicalDeviceAccelerationStructureFeaturesKHR),
rtpipe: OnlyIf(settings.rt_on, vk.PhysicalDeviceRaytracingPipelineFeaturesKHR),
rquery: OnlyIf(settings.rt_on, vk.PhysicalDeviceRayQueryFeaturesKHR),
};
const Device = struct {
const Self = @This();
dev: vk.Device,
phdev: vk.PhysicalDevice,
props: Props,
exts: Extensions.Device,
feats: Features,
pub fn init(renderer: *Renderer) !void {
const self = Self{};
try self.createDevice(renderer);
errdefer vkd.destroyDevice(self.dev, null);
// TODO: volk init?
try queues.init();
}
pub fn deinit(self: *Self) void {
queues.deinit();
if (self.dev) {
vkd.destroyDevice(self.dev, null);
}
}
pub fn waitIdle(self: *Self) !void {
try self.dev.waitIdle();
}
pub fn getName(self: *Self) ![]u8 {
return self.props.getName();
}
const DeviceScore = struct {
rt_score: i32,
ext_score: i32,
feat_score: i32,
prop_score: i32,
has_required_exts: bool,
has_queue_support: bool,
};
fn selectPhysicalDevice(self: *Device, instance: *Instance) !void {
const device_count: u32 = undefined;
_ = try vki.enumeratePhysicalDevices(instance.vkInst, &device_count, null);
const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count);
defer allocator.free(pdevs);
_ = try vki.enumeratePhysicalDevices(instance.vkInst, &device_count, pdevs.ptr);
const prop_list = try allocator.alloc(Props, device_count);
defer allocator.free(prop_list);
for (prop_list) |prop, i| {
prop.* = .{
.p_next = &prop.accstr,
.accstr = .{
.s_type = .physical_device_acceleration_structure_properties_khr,
.p_next = &prop.rtpipe,
},
.rtpipe = .{
.s_type = .physical_device_ray_tracing_pipeline_properties_khr,
},
};
vkd.getPhysicalDeviceProperties2(pdevs[i], &prop.phdev);
}
const feats_list = allocator.alloc(Features, device_count);
defer allocator.free(feats_list);
for (feats_list) |feat, i| {
feat.phdev.s_type = .physical_device_features_2;
if (settings.rt_on) {
feat.phdev.p_next = &feat.accstr;
feat.accstr.s_type = .physical_device_acceleration_structure_features_khr;
feat.accstr.p_next = &feat.rtpipe;
feat.rtpipe.s_type = .physical_device_ray_tracing_pipeline_features_khr;
feat.rtpipe.p_next = &feat.rquery;
feat.rquery.s_type = .physical_device_ray_query_features_khr;
}
vkd.getPhysicalDeviceFeatures2(pdevs[i], &feat.phdev);
}
const exts_list = allocator.alloc(Extensions.dev_s, device_count);
defer allocator.free(exts_list);
for (exts_list) |ext, i| {
const count: u32 = undefined;
vkd.enumerateDeviceExtensionProperties(pdevs[i], null, &count, null);
const props = allocator.alloc(vk.ExtensionProperties, count);
defer allocator.free(props);
vkd.enumerateDeviceExtensionProperties(pdevs[i], null, &count, props.ptr);
ext.* = Extensions.Device.get(props);
}
const scores_list = allocator.alloc(DeviceScore, device_count);
defer allocator.free(scores_list);
for (scores_list) |_, i| {
scores_list[i].has_required_exts = hasRequired(&exts_list[i]);
scores_list[i].rt_score = rtEval(&exts_list[i]);
scores_list[i].ext_score = extsEval(&exts_list[i]);
scores_list[i].feat_score = featsEval(&feats_list[i]);
scores_list[i].prop_score = propsEval(&prop_list[i]);
const queue_support = try queues.QueueSupport.init(&pdevs[i]);
defer queue_support.deinit();
var has_queue_support = true;
for (queue_support.families) |family| {
has_queue_support &= family >= 0;
}
scores_list[i].has_queue_support = has_queue_support;
}
var chosen_dev: ?i32 = null;
for (scores_list) |score, i| {
if (!score.has_required_exts) {
continue;
}
if (!score.has_queue_support) {
continue;
}
if (chosen_dev) |chosen| {
var cmp = scores_list[chosen].rt_score - score.rt_score;
if (cmp != 0) {
chosen_dev = if (cmp < 0) i else chosen;
continue;
}
cmp = scores_list[chosen].ext_score - score.ext_score;
if (cmp != 0) {
chosen_dev = if (cmp < 0) i else chosen;
continue;
}
cmp = scores_list[chosen].feat_score - score.feat_score;
if (cmp != 0) {
chosen_dev = if (cmp < 0) i else chosen;
continue;
}
cmp = scores_list[chosen].prop_score - score.prop_score;
if (cmp != 0) {
chosen_dev = if (cmp < 0) i else chosen;
continue;
}
} else {
chosen_dev = i;
}
}
if (chosen_dev) |chosen| {
self.props = prop_list[chosen];
self.feats = feats_list[chosen];
self.exts = exts_list[chosen];
self.dev = pdevs[chosen];
} else unreachable;
}
fn hasRequired(exts: Extensions) bool {
var has_all = false;
has_all &= exts.khr_swapchain;
return has_all;
}
fn rtEval(exts: *Extensions) i32 {
var score = 0;
score += if (exts.khr_acceleration_structure and exts.khr_ray_tracing_pipeline) 1 else 0;
score += if (exts.khr_ray_query) 1 else 0;
return score;
}
fn extsEval(exts: *Extensions) i32 {
var score = 0;
score += if (exts.ext_memory_budget) 1 else 0;
score += if (exts.ext_hdr_metadata) 1 else 0;
score += if (exts.khr_shader_float16_int8) 1 else 0;
score += if (exts.khr_16bit_storage) 1 else 0;
score += if (exts.khr_push_descriptor) 1 else 0;
score += if (exts.ext_memory_priority) 1 else 0;
score += if (exts.khr_bind_memory2) 1 else 0;
score += if (exts.shader_float_controls) 1 else 0;
score += if (exts.spirv_1_4) 1 else 0;
score += if (exts.conditional_rendering) 1 else 0;
score += if (exts.draw_indirect_count) 1 else 0;
return score;
}
fn featsEval(feats: *Features) i32 {
var score = 0;
// ------------------------------------------------------------------------
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceFeatures.html
// highly useful things
score += if (feats.phdev.features.full_draw_index_uint_32) 16 else 0;
score += if (feats.phdev.features.sampler_anisotropy) 16 else 0;
score += if (feats.phdev.features.texture_compression_bc) 16 else 0;
score += if (feats.phdev.features.independent_blend) 16 else 0;
// debug drawing
score += if (feats.phdev.features.fill_mode_non_solid) 2 else 0;
score += if (feats.phdev.features.wide_lines) 2 else 0;
score += if (feats.phdev.features.large_points) 2 else 0;
// profiling
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkQueryPipelineStatisticFlagBits.html
score += if (feats.phdev.features.pipeline_statistics_query) 2 else 0;
// shader features
score += if (feats.phdev.features.fragment_stores_and_atomics) 4 else 0;
score += if (feats.phdev.features.shader_int_64) 4 else 0;
score += if (feats.phdev.features.shader_int_16) 1 else 0;
score += if (feats.phdev.features.shader_storage_image_extended_formats) 4 else 0;
// dynamic indexing
score += if (feats.phdev.features.shader_uniform_buffer_array_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.shader_storage_buffer_array_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.shader_sampled_imageArray_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.shader_storage_imageArray_dynamic_indexing) 4 else 0;
score += if (feats.phdev.features.image_cube_array) 1 else 0;
// indirect and conditional rendering
score += if (feats.phdev.features.full_draw_index_uint_32) 1 else 0;
score += if (feats.phdev.features.multi_draw_indirect) 1 else 0;
score += if (feats.phdev.features.draw_indirect_first_instance) 1 else 0;
if (settings.rt_on) {
// ------------------------------------------------------------------------
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceAccelerationStructureFeaturesKHR.html
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#acceleration-structure
score += if (feats.accstr.acceleration_structure) 64 else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#vkCmdBuildAccelerationStructuresIndirectKHR
//score += if (feats.accstr.accelerationStructureIndirectBuild) 16 else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#features-accelerationStructureHostCommands
//score += if (feats.accstr.accelerationStructureHostCommands) 16 else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#features-descriptorBindingAccelerationStructureUpdateAfterBind
//score += if (feats.accstr.descriptorBindingAccelerationStructureUpdateAfterBind) 4 else 0;
// ------------------------------------------------------------------------
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceRayTracingPipelineFeaturesKHR.html
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#ray-tracing
score += if (feats.rtpipe.ray_tracing_pipeline) 64 else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#vkCmdTraceRaysIndirectKHR
//score += if (feats.rtpipe.rayTracingPipelineTraceRaysIndirect) 16 else 0;
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#ray-traversal-culling-primitive
//score += if (feats.rtpipe.rayTraversalPrimitiveCulling) 16 else 0;
// ------------------------------------------------------------------------
// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceRayQueryFeaturesKHR.html
// https://github.com/KhronosGroup/SPIRV-Registry/blob/master/extensions/KHR/SPV_KHR_ray_query.asciidoc
score += if (feats.rquery.ray_query) 64 else 0;
}
return score;
}
fn propsEval(props: *Props) i32 {
var score = 0;
score += limitsEval(props.limits);
if (settings.rt_on) {
score += accStrEval(props.accstr);
score += rtPipeEval(props.rtpipe);
}
return score;
}
fn limitsEval(lims: *const vk.PhysicalDeviceLimits) i32 {
var score = 0;
score += log2(lims.max_image_dimension_1d);
score += log2(lims.max_image_dimension_2d);
score += log2(lims.max_image_dimension_3d);
score += log2(lims.max_image_dimension_cube);
score += log2(lims.max_image_array_layers);
score += log2(lims.max_uniform_buffer_range);
score += log2(lims.max_storage_buffer_range);
score += log2(lims.max_push_constants_size);
score += log2(lims.max_memory_allocation_count);
score += log2(lims.max_per_stage_descriptor_storage_buffers);
score += log2(lims.max_per_stage_descriptor_sampled_images);
score += log2(lims.max_per_stage_descriptor_storage_images);
score += log2(lims.max_per_stage_resources);
score += log2(lims.max_vertex_input_attributes);
score += log2(lims.max_vertex_input_bindings);
score += log2(lims.max_fragment_input_components);
score += log2(lims.max_compute_shared_memory_size);
score += log2(lims.max_compute_work_group_invocations);
score += log2(lims.max_draw_indirect_count);
score += log2(lims.max_framebuffer_width);
score += log2(lims.max_framebuffer_height);
score += log2(lims.max_color_attachments);
return score;
}
fn accStrEval(accstr: *const vk.PhysicalDeviceAccelerationStructurePropertiesKHR) i32 {
var score = 0;
score += log2(accstr.max_geometry_count);
score += log2(accstr.max_instance_count);
score += log2(accstr.max_primitive_count);
score += log2(accstr.max_per_stage_descriptor_acceleration_structures);
score += log2(accstr.max_descriptor_set_acceleration_structures);
score += log2(accstr.max_descriptor_set_update_after_bind_acceleration_structures);
return score;
}
fn rtPipeEval(rtpipe: *const vk.PhysicalDeviceRaytracingPipelineFeaturesKHR) i32 {
var score = 0;
score += log2(rtpipe.max_ray_recursion_depth);
score += log2(rtpipe.max_ray_dispatch_invocation_count);
score += log2(rtpipe.mayx_ray_hit_attribute_size);
return score;
}
fn createDevice(self: *Self, renderer: *Renderer) !void {
const surface = renderer.window.surface;
const queue_support = try queues.QueueSupport.init(surface);
defer queue_support.deinit();
const families = std.ArrayHashMap(i32, i32).init(allocator);
defer families.deinit();
for (queue_support.families) |family| {
const result = families.getOrPutAssumeCapacity(family);
result.value_ptr.* = if (result.found_existing) result.value_ptr.* + 1 else 1;
}
const priorities = [_]f32{ 1.0, 1.0, 1.0, 1.0 };
const queue_infos = [queues.QueueId.count]vk.DeviceQueueCreateInfo{};
const it = families.iterator();
{
var i: usize = 0;
while (it.next()) |entry| : (i += 1) {
queue_infos[i] = .{
.s_type = .device_queue_create_info,
.queue_family_index = entry.key_ptr.*,
.queue_count = entry.value_ptr.*,
.p_queue_priorities = priorities,
};
}
}
const ph_feats = &self.props.phdev_features;
self.props = .{
.phdev = .{
.s_type = .physical_device_features_2,
.features = .{
.full_draw_index_uint_32 = ph_feats.full_draw_index_uint_32,
.sampler_anisotropy = ph_feats.sampler_anisotropy,
.texture_compression_bc = ph_feats.texture_compression_bc,
.independent_blend = ph_feats.independent_blend,
.fill_mode_non_solid = ph_feats.fill_mode_non_solid,
.wide_lines = ph_feats.wide_lines,
.large_points = ph_feats.large_points,
.fragment_stores_and_atomics = ph_feats.fragment_stores_and_atomics,
.shader_int_64 = ph_feats.shader_int_64,
.shader_int_16 = ph_feats.shader_int_16,
.shader_storage_image_extended_formats = ph_feats.shader_storage_image_extended_formats,
.shader_uniform_buffer_array_dynamic_indexing = ph_feats.shader_uniform_buffer_array_dynamic_indexing,
.shader_storage_buffer_array_dynamic_indexing = ph_feats.shader_storage_buffer_array_dynamic_indexing,
.shader_sampled_image_array_dynamic_indexing = ph_feats.shader_sampled_image_array_dynamic_indexing,
.shader_storage_image_array_dynamic_indexing = ph_feats.shader_storage_image_array_dynamic_indexing,
.image_cube_array = ph_feats.image_cube_array,
},
},
};
if (settings.rt_on) {
self.props.accstr = .{
.s_type = .physical_device_acceleration_structure_features_khr,
.acceleration_structure = self.props.accstr.acceleration_structure,
.acceleration_structure_indirect_build = self.props.accstr.acceleration_structure_indirect_build,
.acceleration_structure_host_commands = self.props.accstr.acceleration_structure_host_commands,
};
self.props.rtpipe = .{
.s_type = .physical_device_ray_tracing_pipeline_features_khr,
.ray_tracing_pipeline = self.props.rtpipe.ray_tracing_pipeline,
.ray_tracing_pipeline_trace_rays_indirect = self.props.rtpipe.ray_tracing_pipeline_trace_rays_indirect,
.ray_traversal_primitive_culling = self.props.rtpipe.ray_traversal_primitive_culling,
};
self.props.rquery = .{
.s_type = .physical_device_ray_query_features_khr,
.ray_query = self.props.rquery.ray_query,
};
}
if (self.exts.khr_ray_tracing_pipeline) {
self.props.phdev.p_next = &self.props.accstr;
self.props.accstr.p_next = &self.props.rtpipe;
self.props.rtpipe.p_next = &self.props.rquery;
}
const ext_arr = self.exts.toArray(allocator);
defer allocator.free(ext_arr);
self.dev = try vki.createDevice(self.phdev, &.{
.flags = .{},
.p_next = &self.props.phdev,
.queue_create_info_count = families.count(),
.p_queue_create_infos = queue_infos,
.enabled_layer_count = layers.len,
.p_enabled_layer_names = layers,
.enabled_extension_count = ext_arr.len,
.p_enabled_extension_names = ext_arr.ptr,
}, null);
}
fn getExtensions(avail_exts: *std.StringArrayHashMap) !std.ArrayList([][*:0]const u8) {
const list = try std.ArrayList([*:0]const u8).init(allocator);
for (try glfw.getRequiredDeviceExtensions()) |ext| {
if (avail_exts.contains(ext)) {
list.append(ext);
}
}
for (Extensions.dev) |ext| {
if (avail_exts.contains(ext)) {
list.append(ext);
}
}
return list;
}
};

View File

@ -0,0 +1,120 @@
const glfw = @import("glfw");
const vk = @import("vulkan");
const Renderer = @import("Renderer.zig");
const Instance = @import("instance.zig").Instance;
const Display = struct {
pub fn getWorkSize() !vk.Extent2D {
try glfw.init(.{});
const monitor = try glfw.getPoop();
const work_area = try monitor.getWorkArea();
if (work_area.width <= 0 or work_area.height <= 0) {
return error.WorkAreaNotReady;
}
return .{
.width = work_area.width,
.height = work_area.height,
};
}
pub fn getFullSize() !glfw.Size {
try glfw.init(.{});
const monitor = try glfw.getPrimaryMonitor();
const modeCount: u32 = undefined;
const modes = try monitor.getVideoModes(monitor, &modeCount);
if (modeCount <= 0) {
return error.NoModes;
}
var chosenArea: u32 = undefined;
var chosenMode: *glfw.VideoMode = undefined;
for (modes) |mode| {
const area = mode.width * mode.height;
if (area > chosenArea) {
chosenArea = area;
chosenMode = &mode;
}
}
if (chosenArea == null) {
return error.NoModes;
}
return .{
.width = chosenMode.width,
.height = chosenMode.height,
};
}
pub fn getSize(fullscreen: bool) !glfw.Size {
return if (fullscreen) try getFullSize() else try getWorkSize();
}
};
const Window = struct {
const Self = @This();
fullscreen: bool,
handle: glfw.Window,
size: glfw.Size,
pub fn init(instance: *Instance, title: [][*:0]u8, width: u32, height: u32, fullscreen: bool) !void {
try glfw.init(.{});
const self = Self{
.fullscreen = fullscreen,
};
self.handle = try glfw.Window.create(width, height, title, glfw.getPrimaryMonitor(), null, .{
.client_api = .no_api,
.srgb_capable = true,
.auto_iconify = !fullscreen,
.maximized = !fullscreen,
});
errdefer self.deinit();
// TODO: input: register window
self.size = try self.handle.getFramebufferSize();
self.surface = try glfw.createWindowSurface(instance.vkInst);
return self;
}
pub fn deinit(self: *Self, instance: *Instance) void {
if (self.surface) {
self.vki.destroySurfaceKHR(instance.vkInst, self.surface, null);
}
if (self.handle) {
self.handle.destroy();
}
}
pub fn updateSize(self: *Self) !bool {
const prevSize = self.size;
self.size = try self.handle.getFramebufferSize();
const changed = prevSize.width != self.size.width or prevSize.height != self.size.height;
return changed;
}
pub fn getPosition(self: *Self) !glfw.Pos {
return try self.handle.getPos();
}
pub fn setPosition(self: *Self, pos: glfw.Pos) !glfw.Pos {
return try self.handle.setPos(pos);
}
pub fn isOpen(self: *Self) !bool {
return try self.handle.shouldClose();
}
pub fn poll() !void {
return try glfw.pollEvents();
}
};

View File

@ -1,31 +0,0 @@
const settings = @import("settings");
const dev_standard = [_][*:0]u8{
"KHR_swapchain",
"EXT_memory_budget",
"EXT_hdr_metadata",
"KHR_shader_float16_int8",
"KHR_16bit_storage",
"KHR_push_descriptor",
"EXT_memory_priority",
"KHR_bind_memory2",
"KHR_shader_float_controls",
"KHR_spirv_1_4",
"EXT_conditional_rendering",
"KHR_draw_indirect_count",
};
const dev_rt = if (settings.rt_on) [_][*:0]u8{
"KHR_acceleration_structure",
"KHR_ray_tracing_pipeline",
"KHR_ray_query",
"KHR_deferred_host_operations",
} else {};
pub const dev = dev_standard ++ dev_rt;
pub const inst = [_][*:0]u8{
"KHR_get_physical_device_properties2",
"EXT_swapchain_colorspace",
if (settings.messenger_on) "EXT_debug_utils" else void,
};

View File

@ -2,15 +2,34 @@ const std = @import("std");
const vk = @import("vulkan");
const glfw = @import("glfw");
const vkb = @import("base.zig").BaseDispatch;
const settings = @import("settings.zig");
const extensions = @import("extensions.zig");
const Extensions = @import("Extensions.zig");
const layers = @import("layers.zig").layers;
const DebugMessenger = @import("debug.zig").DebugMessenger;
const DebugMessenger = @import("DebugMessenger.zig");
// TODO temp allocator
const allocator = std.heap.GeneralPurposeAllocator;
const InstanceDispatch = vk.InstanceWrapper(.{
.destroyInstance = true,
.createDevice = true,
.destroySurfaceKHR = true,
.enumeratePhysicalDevices = true,
.getPhysicalDeviceProperties = true,
.enumerateDeviceExtensionProperties = true,
.getPhysicalDeviceSurfaceFormatsKHR = true,
.getPhysicalDeviceSurfacePresentModesKHR = true,
.getPhysicalDeviceSurfaceCapabilitiesKHR = true,
.getPhysicalDeviceQueueFamilyProperties = true,
.getPhysicalDeviceSurfaceSupportKHR = true,
.getPhysicalDeviceMemoryProperties = true,
.getDeviceProcAddr = true,
});
const vki = InstanceDispatch;
pub const Instance = struct {
const Self = @This();
@ -39,16 +58,17 @@ pub const Instance = struct {
.api_version = vk.API_VERSION_1_2,
};
self.vkInst = try self.vkb.createInstance(&.{
self.vkInst = try vkb.createInstance(&.{
.flags = .{},
.p_application_info = &app_info,
.enabled_layer_count = @intCast(u32, self.layers.len),
.pp_enabled_layer_names = self.layers.ptr,
.enabled_extension_count = @intCast(u32, self.extensions.len),
.pp_enabled_extension_names = self.extensions.ptr,
.pp_enabled_extension_names = self.extensions.items,
}, null);
errdefer self.vki.destroyInstance(self.vkInst, null);
self.messenger = DebugMessenger.init();
self.messenger = DebugMessenger.init(&self);
return self;
}
@ -56,9 +76,9 @@ pub const Instance = struct {
pub fn deinit(self: Self) void {
self.vki.destroyInstance(self.vkInst, null);
// TODO: remove when using temp allocator
self.layers.deinit();
self.extensions.deinit();
self.messenger.deinit(&self);
}
/// list all available layers
@ -124,7 +144,9 @@ pub const Instance = struct {
}
}
for (extensions.inst) |ext| {
const instExts = Extensions.toList(Extensions.Instance);
defer instExts.deinit();
for (instExts) |ext| {
if (avail_exts.contains(ext)) {
list.append(ext);
}

View File

@ -1,6 +1,6 @@
const settings = @import("settings.zig");
pub const layers = [_][*:0]u8{
pub const layers = [_][*:0]const u8{
if (settings.khronos_layer_on) "KHRONOS_validation" else void,
if (settings.assist_layer_on) "LUNARG_assistant_layer" else void,
};

View File

@ -0,0 +1,315 @@
const std = @import("std");
const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const dev = @import("device.zig");
const vkd = dev.DeviceDispatch;
const Renderer = @import("Renderer.zig");
const Command = @import("Command.zig");
// TODO memory
const gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const present_stages: vk.PipelineStageFlags =
.all_commands_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit;
const present_access: vk.PipelineAccessFlags =
.memory_read_bit |
.memory_write_bit;
const graphics_stages: vk.PipelineStageFlags =
.all_commands_bit |
.all_graphics_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit |
.draw_indirect_bit |
.vertex_input_bit |
.vertex_shader_bit |
.tessellation_control_shader_bit |
.tessellation_evaluation_shader_bit |
.geometry_shader_bit |
.fragment_shader_bit |
.early_fragment_tests_bit |
.late_fragment_tests_bit |
.color_attachment_output_bit |
.conditional_rendering_bit_ext |
.task_shader_bit_nv |
.mesh_shader_bit_nv;
const graphics_access: vk.AccessFlags =
.memory_read_bit |
.memory_write_bit |
.indirect_command_read_bit |
.index_read_bit |
.vertex_attribute_read_bit |
.uniform_read_bit |
.input_attachment_read_bit |
.shader_read_bit |
.shader_write_bit |
.color_attachment_read_bit |
.color_attachment_write_bit |
.conditional_rendering_read_bit_ext |
.acceleration_structure_read_bit_khr |
.acceleration_structure_write_bit_khr;
const compute_stages: vk.PipelineStageFlags =
.all_commands_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit |
.compute_shader_bit |
.acceleration_structure_build_bit_khr |
.ray_tracing_shader_bit_khr;
const compute_access: vk.AccessFlags =
.memory_read_bit |
.memory_write_bit |
.indirect_command_read_bit |
.uniform_read_bit |
.shader_read_bit |
.shader_write_bit |
.acceleration_structure_read_bit_khr |
.acceleration_structure_write_bit_khr;
const transfer_stages: vk.PipelineStageFlags =
.all_commands_bit |
.top_of_pipe_bit |
.bottom_of_pipe_bit |
.transfer_bit |
.host_bit;
const transfer_access: vk.AccessFlags =
.memory_read_bit |
.memory_write_bit |
.host_read_bit |
.host_write_bit |
.transfer_read_bit |
.transfer_write_bit;
pub const QueueId = enum(u4) {
graphics,
compute,
transfer,
present,
pub const count = @typeInfo(@This()).Enum.fields.len;
};
const queues: [QueueId.count]Queue = undefined;
pub fn init(device: *dev.Device, window: *dev.Window) !void {
assert(device.phdev);
assert(window.surface);
const support = try QueueSupport.init(device.phdev, window.surface);
defer support.deinit();
for (queues) |queue, i| {
queue.init(&support, i);
}
}
pub fn deinit(device: dev.Device) void {
device.waitIdle();
for (queues) |queue| {
queue.deinit();
}
}
pub fn get(id: QueueId) *Queue {
assert(id < queues.len);
return &queues[id];
}
const QueueSupport = struct {
const Self = @This();
families: [QueueId.count]?i32,
indices: [QueueId.count]i32,
properties: []vk.QueueFamilyProperties,
pub fn init(device: dev.Device, window: dev.Window) !void {
const self = Self{};
const count: i32 = undefined;
vkd.physicalDeviceQueueFamilyProperties(device.phdev, &count, null);
self.properties = allocator.alloc(vk.QueueFamilyProperties, count);
vkd.physicalDeviceQueueFamilyProperties(device.phdev, &count, self.properties.ptr);
self.families[QueueId.graphics] = selectGfxFamily(self.properties);
self.families[QueueId.compute] = selectCompFamily(self.properties);
self.families[QueueId.transfer] = selectXferFamily(self.properties);
self.families[QueueId.present] = selectPresFamily(device.phdev, window.surface, self.properties);
// TODO: don't understand the purpose of 'indices' yet...
const choice_counts = allocator.alloc(i32, count);
defer allocator.free(choice_counts);
for (self.families) |family| {
const choice_count = choice_counts[family];
self.indices = choice_count;
choice_counts[family] += 1;
}
return self;
}
pub fn deinit(self: *Self) void {
allocator.free(self.properties);
}
fn selectGfxFamily(families: []vk.QueueFamilyProperties) i32 {
var index: ?i32 = null;
var score: u32 = 0;
for (families) |family, i| {
if (family.queue_count == 0) {
continue;
}
if (family.queue_flags & .graphics_bit) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .compute_bit) 1 else 0;
new_score += if (family.queue_flags & .transfer_bit) 1 else 0;
if (new_score > score) {
score = new_score;
index = i;
}
}
}
return index;
}
fn selectCompFamily(families: []vk.QueueFamilyProperties) i32 {
var index: ?i32 = null;
var score: u32 = 0;
for (families) |family, i| {
if (family.queue_count == 0) {
continue;
}
if (family.queue_flags & .compute_bit) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .graphics_bit) 1 else 0;
new_score += if (family.queue_flags & .transfer_bit) 1 else 0;
if (new_score > score) {
score = new_score;
index = i;
}
}
}
return index;
}
fn selectXferFamily(families: []vk.QueueFamilyProperties) i32 {
var index: ?i32 = null;
var score: u32 = 0;
for (families) |family, i| {
if (family.queue_count == 0) {
continue;
}
if (family.queue_flags & .transfer_bit) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .graphics_bit) 1 else 0;
new_score += if (family.queue_flags & .compute_bit) 1 else 0;
if (new_score > score) {
score = new_score;
index = i;
}
}
}
return index;
}
fn selectPresFamily(phdev: vk.PhysicalDevice, surf: vk.SurfaceKHR, families: []vk.QueueFamilyProperties) !i32 {
var index: ?i32 = null;
var score = 0;
for (families) |family, i| {
const presentable = try vkd.getPhysicalDeviceSurfaceSupportKHR(phdev, i, surf);
if (presentable) {
var new_score: u32 = 0;
new_score += if (family.queue_flags & .graphics_bit) 1 else 0;
new_score += if (family.queue_flags & .compute_bit) 1 else 0;
new_score += if (family.queue_flags & .transfer_bit) 1 else 0;
if (new_score > score) {
score = new_score;
index = i;
}
}
}
return index;
}
};
const Queue = packed struct {
handle: vk.Queue,
family: i32,
index: i32,
access_mask: vk.AccessFlags,
stage_mask: vk.PipelineStageFlags,
queueId: u4,
gfx: bool,
comp: bool,
xfer: bool,
pres: bool,
cmd_pool: vk.CommandPool,
cmds: [Renderer.cmds_per_queue]vk.CommandBuffer,
cmd_fences: [Renderer.cmds_per_queue]vk.Fence,
cmd_ids: [Renderer.cmds_per_queue]u32,
head: u32,
tail: u32,
const Self = @This();
pub fn init(self: *Self, device: dev.Device, support: *QueueSupport, id: i32) !void {
const family = support.family[id];
const index = support.index[id];
assert(family >= 0);
assert(index >= 0);
const handle = try vkd.getDeviceQueue(device, family, index);
self.family = family;
self.index = index;
self.handle = handle;
const pres_family = support.family[QueueId.present];
const queue_flags = support.properties.queue_flags;
if (queue_flags & .graphics_bit) {
self.gfx = true;
self.stage_mask |= graphics_stages;
self.access_mask |= graphics_access;
}
if (queue_flags & .compute_bit) {
self.comp = true;
self.stage_mask |= compute_stages;
self.access_mask |= compute_access;
}
if (queue_flags & .transfer_bit) {
self.xfer = true;
self.stage_mask |= transfer_stages;
self.access_mask |= transfer_access;
}
if (family == pres_family) {
self.pres = true;
self.stage_mask |= present_stages;
self.access_mask |= present_access;
}
assert(self.stage_mask != 0);
assert(self.access_mask != 0);
try Command.init(self, id);
}
pub fn deinit(self: *Self) void {
Command.deinit(self);
}
};

View File

@ -1,165 +0,0 @@
const std = @import("std");
const Instance = @import("instance.zig").Instance;
const Window = @import("window.zig").Window;
const Device = @import("device.zig").Device;
const Memory = @import("memory.zig").Memory;
const Framebuffer = @import("framebuffer.zig").Framebuffer;
const Swapchain = @import("swapchain.zig").Swapchain;
const Context = @import("context.zig").Context;
const MainPass = @import("main_pass.zig").MainPass;
const Display = @import("display.zig").Display;
const Command = @import("command.zig").Command;
var g_renderer: VulkanRenderer = undefined;
pub const VulkanRenderer = struct {
instance: Instance,
window: Window,
device: Device,
memory: Memory,
framebuffer: Framebuffer,
swapchain: Swapchain,
context: Context,
// sampler: Sampler, TODO
// texTable: TexTable, TODO
// bindings: Bindings, TODO
// targets: Targets, TODO
// meshSys: MeshSys, TODO
// imSys: ImSys, TODO
mainPass: MainPass,
const Self = @This();
pub fn init() !Self {
const self = Self{};
errdefer std.debug.print("failed to init VulkanRenderer", .{});
errdefer self.deinit();
try self.instance.init();
try windowInit(&self.window);
try self.device.init();
try self.memory.init();
try self.framebuffer.init();
try self.swapchain.init(null);
try self.context.init();
// try self.sampler.init();
// try self.texTable.init();
// try self.bindings.init();
// try self.targets.init();
// try self.meshSys.init();
// try self.imSys.init();
try self.mainPass.init();
return self;
}
pub fn update(self: Self) bool {
// TODO profiling
// base system update
{
self.swapchain.acquireSync();
self.swapchain.acquireImage();
self.memory.update();
}
// system update
{
// TODO: sampler/meshsys/imsys update
}
// setup phase
{
self.mainPass.setup();
// TODO textable update
Command.flush();
// TODO bindings update
}
// execute phase
self.mainPass.execute();
// present phase
self.swapchain.submit(Command.get());
// background work
{
// TODO upload lightmaps, imsys clear
Command.flush();
}
}
pub fn deinit(self: Self) void {
self.device.waitIdle();
// TODO: delete lightmap pack
self.mainPass.deinit();
// self.imSys.deinit();
// self.meshSys.deinit();
// self.targets.deinit();
// self.bindings.deinit();
// self.texTable.deinit();
// self.sampler.deinit();
// clear other passes here.
self.memory.finalize();
self.context.deinit();
self.swapchain.deinit();
self.framebuffer.deinit();
self.window.deinit();
self.memory.deinit();
self.device.deinit();
self.instance.deinit();
}
fn windowInit(window: *Window) !void {
// TODO: convar cv_fullscreen
const fullscreen = false;
const extents = try Display.getSize(fullscreen);
try window.init("efemra", extents, fullscreen);
// TODO: convar r_width/r_height set
// TODO: UISys_Init
}
fn windowDeinit(window: *Window) void {
// TODO: uisys_shutdown
// TODO: convar r_width/r_height unset
window.deinit();
}
fn windowUpdate(self: Self) !void {
if (!self.window.isOpen()) {
return error.WindowNotOpen;
}
// TODO: convar fullscreen change check
if (0) {
self.device.waitIdle();
// TODO: UI pass del
self.swapchain.deinit();
self.window.deinit();
self.window.init();
self.swapchain.init(null);
// TODO: UI pass new
}
if (self.window.updateSize()) {
try self.swapchain.recreate();
// TODO set r_width/r_height convar
}
if (!self.swapchain.handle) {
return error.NoSwapchainHandle;
}
self.targets.maybeRecreate();
}
};

View File

@ -7,6 +7,7 @@ pub const khronos_layer_on = true;
pub const assist_layer_on = true;
pub const messenger_on = true;
pub const rt_on = false;
pub const hdr_on = true;
pub fn OnlyIf(comptime setting: bool, t: anytype) t {
return if (setting) t else void;

View File

@ -0,0 +1,75 @@
const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const vkd = @import("device.zig").DeviceDispatch;
const Device = @import("device.zig").Device;
const Renderer = @import("Renderer.zig");
pub const Semaphore = struct {
const Self = @This();
handle: vk.Semaphore,
pub fn init(device: *Device) !Self {
assert(device.dev);
const handle = try vkd.createSemaphore(device.dev, .{
.s_type = .semaphore_create_info,
}, null);
return Self{
.handle = handle,
};
}
pub fn deinit(self: Self, device: *Device) void {
vkd.destroySemaphore(device.dev, self.handle, null);
}
};
pub const Fence = struct {
const Self = @This();
pub const State = enum(i32) {
signaled = vk.Result.success,
unsignaled = vk.Result.not_ready,
};
handle: vk.Fence,
pub fn init(device: *Device, signaled: bool) !Self {
assert(device.dev);
const handle = try vkd.createFence(device.dev, .{
.s_type = .fence_create_info,
.flags = if (signaled) .signaled_bit else void,
}, null);
return Self{
.handle = handle,
};
}
pub fn deinit(self: *Self, device: *Device) void {
vkd.destroyFence(device.dev, self.handle, null);
}
pub fn reset(self: *Self, device: *Device) !void {
assert(device.dev);
try vkd.resetFences(device.dev, 1, &self.handle);
}
pub fn wait(self: *Self, device: *Device) !void {
assert(device.dev);
const timeout: u64 = -1;
while (self.stat() != .signaled) {
try vkd.waitForFences(device.dev, 1, &self.handle, false, timeout);
}
}
pub fn stat(self: *Self, device: *Device) !State {
assert(device.dev);
return try vkd.getFenceStatus(device.dev, self.handle);
}
};