forked from vv/efemra
1
0
Fork 0

remove the old shit

This commit is contained in:
Vivianne 2022-07-27 03:06:51 -07:00
parent 457c8ad742
commit 50b68e34b6
4 changed files with 0 additions and 1192 deletions

View File

@ -1,354 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const glfw = @import("glfw");
const Allocator = std.mem.Allocator;
const required_device_extensions = [_][*:0]const u8{vk.extension_info.khr_swapchain.name};
const BaseDispatch = vk.BaseWrapper(.{
.createInstance = true,
});
const InstanceDispatch = vk.InstanceWrapper(.{
.destroyInstance = true,
.createDevice = true,
.destroySurfaceKHR = true,
.enumeratePhysicalDevices = true,
.getPhysicalDeviceProperties = true,
.enumerateDeviceExtensionProperties = true,
.getPhysicalDeviceSurfaceFormatsKHR = true,
.getPhysicalDeviceSurfacePresentModesKHR = true,
.getPhysicalDeviceSurfaceCapabilitiesKHR = true,
.getPhysicalDeviceQueueFamilyProperties = true,
.getPhysicalDeviceSurfaceSupportKHR = true,
.getPhysicalDeviceMemoryProperties = true,
.getDeviceProcAddr = true,
});
const DeviceDispatch = vk.DeviceWrapper(.{
.destroyDevice = true,
.getDeviceQueue = true,
.createSemaphore = true,
.createFence = true,
.createImageView = true,
.destroyImageView = true,
.destroySemaphore = true,
.destroyFence = true,
.getSwapchainImagesKHR = true,
.createSwapchainKHR = true,
.destroySwapchainKHR = true,
.acquireNextImageKHR = true,
.deviceWaitIdle = true,
.waitForFences = true,
.resetFences = true,
.queueSubmit = true,
.queuePresentKHR = true,
.createCommandPool = true,
.destroyCommandPool = true,
.allocateCommandBuffers = true,
.freeCommandBuffers = true,
.queueWaitIdle = true,
.createShaderModule = true,
.destroyShaderModule = true,
.createPipelineLayout = true,
.destroyPipelineLayout = true,
.createRenderPass = true,
.destroyRenderPass = true,
.createGraphicsPipelines = true,
.destroyPipeline = true,
.createFramebuffer = true,
.destroyFramebuffer = true,
.beginCommandBuffer = true,
.endCommandBuffer = true,
.allocateMemory = true,
.freeMemory = true,
.createBuffer = true,
.destroyBuffer = true,
.getBufferMemoryRequirements = true,
.mapMemory = true,
.unmapMemory = true,
.bindBufferMemory = true,
.cmdBeginRenderPass = true,
.cmdEndRenderPass = true,
.cmdBindPipeline = true,
.cmdDraw = true,
.cmdSetViewport = true,
.cmdSetScissor = true,
.cmdBindVertexBuffers = true,
.cmdCopyBuffer = true,
});
pub const GraphicsContext = struct {
vkb: BaseDispatch,
vki: InstanceDispatch,
vkd: DeviceDispatch,
instance: vk.Instance,
surface: vk.SurfaceKHR,
pdev: vk.PhysicalDevice,
props: vk.PhysicalDeviceProperties,
mem_props: vk.PhysicalDeviceMemoryProperties,
dev: vk.Device,
graphics_queue: Queue,
present_queue: Queue,
pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *glfw.Window) !GraphicsContext {
var self: GraphicsContext = undefined;
const vk_proc = @ptrCast(fn (instance: vk.Instance, procname: [*:0]const u8) callconv(.C) vk.PfnVoidFunction, glfw.getInstanceProcAddress);
self.vkb = try BaseDispatch.load(vk_proc);
const glfw_exts = try glfw.getRequiredInstanceExtensions();
const app_info = vk.ApplicationInfo{
.p_application_name = app_name,
.application_version = vk.makeApiVersion(0, 0, 0, 0),
.p_engine_name = app_name,
.engine_version = vk.makeApiVersion(0, 0, 0, 0),
.api_version = vk.API_VERSION_1_2,
};
self.instance = try self.vkb.createInstance(&.{
.flags = .{},
.p_application_info = &app_info,
.enabled_layer_count = 0,
.pp_enabled_layer_names = undefined,
.enabled_extension_count = @intCast(u32, glfw_exts.len),
.pp_enabled_extension_names = glfw_exts.ptr,
}, null);
self.vki = try InstanceDispatch.load(self.instance, vk_proc);
errdefer self.vki.destroyInstance(self.instance, null);
self.surface = try createSurface(self.instance, window);
errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null);
const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface);
self.pdev = candidate.pdev;
self.props = candidate.props;
self.dev = try initializeCandidate(self.vki, candidate);
self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr);
errdefer self.vkd.destroyDevice(self.dev, null);
self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family);
self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family);
self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev);
return self;
}
pub fn deinit(self: GraphicsContext) void {
self.vkd.destroyDevice(self.dev, null);
self.vki.destroySurfaceKHR(self.instance, self.surface, null);
self.vki.destroyInstance(self.instance, null);
}
pub fn deviceName(self: GraphicsContext) []const u8 {
const len = std.mem.indexOfScalar(u8, &self.props.device_name, 0).?;
return self.props.device_name[0..len];
}
pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 {
// wow wtf is this stuff
for (self.mem_props.memory_types[0..self.mem_props.memory_type_count]) |mem_type, i| {
if (memory_type_bits & (@as(u32, 1) << @truncate(u5, i)) != 0 and mem_type.property_flags.contains(flags)) {
return @truncate(u32, i);
}
}
return error.NoSuitableMemoryType;
}
pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory {
return try self.vkd.allocateMemory(self.dev, &.{
.allocation_size = requirements.size,
.memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags),
}, null);
}
};
pub const Queue = struct {
handle: vk.Queue,
family: u32,
fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue {
return .{
.handle = vkd.getDeviceQueue(dev, family, 0),
.family = family,
};
}
};
fn createSurface(instance: vk.Instance, window: *glfw.Window) !vk.SurfaceKHR {
var surface: vk.SurfaceKHR = undefined;
if (glfw.createWindowSurface(instance, window.*, null, &surface)) {
return surface;
} else |_| return error.SurfaceInitFailed;
}
fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device {
const priority = [_]f32{1};
const qci = [_]vk.DeviceQueueCreateInfo{
.{
.flags = .{},
.queue_family_index = candidate.queues.graphics_family,
.queue_count = 1,
.p_queue_priorities = &priority,
},
.{
.flags = .{},
.queue_family_index = candidate.queues.present_family,
.queue_count = 1,
.p_queue_priorities = &priority,
},
};
const queue_count: u32 = if (candidate.queues.graphics_family == candidate.queues.present_family)
1
else
2;
return try vki.createDevice(candidate.pdev, &.{
.flags = .{},
.queue_create_info_count = queue_count,
.p_queue_create_infos = &qci,
.enabled_layer_count = 0,
.pp_enabled_layer_names = undefined,
.enabled_extension_count = required_device_extensions.len,
.pp_enabled_extension_names = @ptrCast([*]const [*:0]const u8, &required_device_extensions),
.p_enabled_features = null,
}, null);
}
const DeviceCandidate = struct {
pdev: vk.PhysicalDevice,
props: vk.PhysicalDeviceProperties,
queues: QueueAllocation,
};
const QueueAllocation = struct {
graphics_family: u32,
present_family: u32,
};
fn pickPhysicalDevice(
vki: InstanceDispatch,
instance: vk.Instance,
allocator: Allocator,
surface: vk.SurfaceKHR,
) !DeviceCandidate {
var device_count: u32 = undefined;
_ = try vki.enumeratePhysicalDevices(instance, &device_count, null);
const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count);
defer allocator.free(pdevs);
_ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr);
for (pdevs) |pdev| {
if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| {
return candidate;
}
}
return error.NoSuitableDevice;
}
fn checkSuitable(
vki: InstanceDispatch,
pdev: vk.PhysicalDevice,
allocator: Allocator,
surface: vk.SurfaceKHR,
) !?DeviceCandidate {
const props = vki.getPhysicalDeviceProperties(pdev);
if (!try checkExtensionSupport(vki, pdev, allocator)) {
return null;
}
if (!try checkSurfaceSupport(vki, pdev, surface)) {
return null;
}
if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| {
return DeviceCandidate{
.pdev = pdev,
.props = props,
.queues = allocation,
};
}
return null;
}
fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation {
var family_count: u32 = undefined;
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
const families = try allocator.alloc(vk.QueueFamilyProperties, family_count);
defer allocator.free(families);
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
var graphics_family: ?u32 = null;
var present_family: ?u32 = null;
for (families) |properties, i| {
const family = @intCast(u32, i);
if (graphics_family == null and properties.queue_flags.graphics_bit) {
graphics_family = family;
}
if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) {
present_family = family;
}
}
if (graphics_family != null and present_family != null) {
return QueueAllocation{
.graphics_family = graphics_family.?,
.present_family = present_family.?,
};
}
return null;
}
fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool {
var format_count: u32 = undefined;
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
var present_mode_count: u32 = undefined;
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null);
return format_count > 0 and present_mode_count > 0;
}
fn checkExtensionSupport(
vki: InstanceDispatch,
pdev: vk.PhysicalDevice,
allocator: Allocator,
) !bool {
var count: u32 = undefined;
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null);
const propsv = try allocator.alloc(vk.ExtensionProperties, count);
defer allocator.free(propsv);
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr);
for (required_device_extensions) |ext| {
for (propsv) |props| {
const len = std.mem.indexOfScalar(u8, &props.extension_name, 0).?;
const prop_ext_name = props.extension_name[0..len];
if (std.mem.eql(u8, std.mem.span(ext), prop_ext_name)) {
break;
}
} else {
return false;
}
}
return true;
}

View File

@ -1,324 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const GraphicsContext = @import("graphics_context.zig").GraphicsContext;
// from https://github.com/Snektron/vulkan-zig/blob/master/examples/swapchain.zig
// todo?
const Allocator = std.mem.Allocator;
pub const Swapchain = struct {
pub const PresentState = enum { optimal, suboptimal };
gc: *const GraphicsContext,
allocator: Allocator,
surface_format: vk.SurfaceFormatKHR,
present_mode: vk.PresentModeKHR,
extent: vk.Extent2D,
handle: vk.SwapchainKHR,
swap_images: []SwapImage,
image_index: u32,
pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain {
return try reinit(gc, allocator, extent, .null_handle);
}
pub fn reinit(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain {
const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface);
const actual_extent = findActualExtent(caps, extent);
if (actual_extent.width == 0 or actual_extent.height == 0) {
return error.InvalidSurfaceDimensions;
}
const surface_format = try findSurfaceFormat(gc, allocator);
const present_mode = try findPresentMode(gc, allocator);
var image_count = caps.min_image_count + 1;
if (caps.max_image_count > 0) {
image_count = std.math.min(image_count, caps.max_image_count);
}
const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family };
const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family)
.concurrent
else
.exclusive;
const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{
.flags = .{},
.surface = gc.surface,
.present_mode = present_mode,
.min_image_count = image_count,
.image_format = surface_format.format,
.image_color_space = surface_format.color_space,
.image_extent = actual_extent,
.image_array_layers = 1,
.image_usage = .{ .color_attachment_bit = true },
.image_sharing_mode = sharing_mode,
.queue_family_index_count = if (sharing_mode == .concurrent) qfi.len else 0,
.p_queue_family_indices = &qfi,
.pre_transform = caps.current_transform,
.composite_alpha = .{ .opaque_bit_khr = true },
.clipped = vk.TRUE,
.old_swapchain = old_handle,
}, null);
errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null);
if (old_handle != .null_handle) {
// Apparently, the old swapchain handle still needs to be destroyed after recreating.
gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null);
}
const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator);
errdefer {
for (swap_images) |si| si.deinit(gc);
allocator.free(swap_images);
}
return Swapchain{
.handle = handle,
.gc = gc,
.allocator = allocator,
.surface_format = surface_format,
.present_mode = present_mode,
.extent = actual_extent,
.swap_images = swap_images,
.image_index = 0,
};
}
fn deinitExceptSwapchain(self: Swapchain) void {
for (self.swap_images) |si| si.deinit(self.gc);
self.allocator.free(self.swap_images);
}
pub fn waitForAllFences(self: Swapchain) !void {
for (self.swap_images) |si| si.waitForFence(self.gc) catch {};
}
pub fn deinit(self: Swapchain) void {
self.deinitExceptSwapchain();
self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null);
}
pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void {
try self.gc.vkd.deviceWaitIdle(self.gc.dev);
const gc = self.gc;
const allocator = self.allocator;
const old_handle = self.handle;
self.deinitExceptSwapchain();
self.* = try reinit(gc, allocator, new_extent, old_handle);
}
pub fn currentImage(self: Swapchain) vk.Image {
return self.swap_images[self.image_index].image;
}
pub fn currentSwapImage(self: Swapchain) *const SwapImage {
return &self.swap_images[self.image_index];
}
// TODO: switch to whatever pim does?
pub fn present(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState {
// Simple method:
// 1) Acquire next image
// 2) Wait for and reset fence of the acquired image
// 3) Submit command buffer with fence of acquired image,
// dependendent on the semaphore signalled by the first step.
// 4) Present current frame, dependent on semaphore signalled by previous step
// Problem: This way we can't reference the current image while rendering.
// Better method: Shuffle the steps around such that acquire next image is the last step,
// leaving the swapchain in a state with the current image.
// 1) Wait for and reset fence of current image
// 2) Submit command buffer, signalling fence of current image and dependent on
// the semaphore signalled by step 4.
// 3) Present current frame, dependent on semaphore signalled by the submit
// 4) Acquire next image, signalling its semaphore
// One problem that arises is that we can't know beforehand which semaphore to signal,
// so we keep an extra auxilery semaphore that is swapped around
// Step 1: Make sure the current frame has finished rendering
var current = self.currentSwapImage();
try current.waitForFence(self.gc);
// Step 4: Acquire next frame
const result = try self.gc.vkd.acquireNextImageKHR(
self.gc.dev,
self.handle,
std.math.maxInt(u64),
current.image_acquired,
.null_handle,
);
std.debug.print("reset fences\n", .{});
try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast([*]const vk.Fence, &current.frame_fence));
std.debug.print("swappy\n", .{});
//std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.sem_next_image_acquired);
self.image_index = result.image_index;
current = self.currentSwapImage();
// Step 2: Submit the command buffer
const wait_stage = [_]vk.PipelineStageFlags{.{ .color_attachment_output_bit = true }};
try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{
.wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast([*]const vk.Semaphore, &self.sem_next_image_acquired),
.p_wait_dst_stage_mask = &wait_stage,
.command_buffer_count = 1,
.p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf),
.signal_semaphore_count = 1,
.p_signal_semaphores = @ptrCast([*]const vk.Semaphore, &current.render_finished),
}}, current.frame_fence);
// Step 3: Present the current frame
_ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{
.wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast([*]const vk.Semaphore, &current.render_finished),
.swapchain_count = 1,
.p_swapchains = @ptrCast([*]const vk.SwapchainKHR, &self.handle),
.p_image_indices = @ptrCast([*]const u32, &self.image_index),
.p_results = null,
});
return switch (result.result) {
.success => .optimal,
.suboptimal_khr => .suboptimal,
else => unreachable,
};
}
};
const SwapImage = struct {
image: vk.Image,
view: vk.ImageView,
image_acquired: vk.Semaphore,
render_finished: vk.Semaphore,
frame_fence: vk.Fence,
pub fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage {
const view = try gc.vkd.createImageView(gc.dev, &.{
.flags = .{},
.image = image,
.view_type = .@"2d",
.format = format,
.components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity },
.subresource_range = .{
.aspect_mask = .{ .color_bit = true },
.base_mip_level = 0,
.level_count = 1,
.base_array_layer = 0,
.layer_count = 1,
},
}, null);
errdefer gc.vkd.destroyImageView(gc.dev, view, null);
const image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{ .flags = .{} }, null);
errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null);
const render_finished = try gc.vkd.createSemaphore(gc.dev, &.{ .flags = .{} }, null);
errdefer gc.vkd.destroySemaphore(gc.dev, render_finished, null);
const frame_fence = try gc.vkd.createFence(gc.dev, &.{ .flags = .{ .signaled_bit = true } }, null);
errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null);
return SwapImage{
.image = image,
.view = view,
.image_acquired = image_acquired,
.render_finished = render_finished,
.frame_fence = frame_fence,
};
}
fn deinit(self: SwapImage, gc: *const GraphicsContext) void {
self.waitForFence(gc) catch return;
gc.vkd.destroyImageView(gc.dev, self.view, null);
gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null);
gc.vkd.destroySemaphore(gc.dev, self.render_finished, null);
gc.vkd.destroyFence(gc.dev, self.frame_fence, null);
}
fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void {
std.debug.print("waitforfence\n", .{});
_ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast([*]const vk.Fence, &self.frame_fence), vk.TRUE, std.math.maxInt(u64));
}
};
fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage {
var count: u32 = undefined;
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null);
const images = try allocator.alloc(vk.Image, count);
defer allocator.free(images);
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr);
const swap_images = try allocator.alloc(SwapImage, count);
errdefer allocator.free(swap_images);
var i: usize = 0;
errdefer for (swap_images[0..i]) |si| si.deinit(gc);
for (images) |image| {
swap_images[i] = try SwapImage.init(gc, image, format);
i += 1;
}
return swap_images;
}
fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR {
const preferred = vk.SurfaceFormatKHR{
.format = .b8g8r8a8_srgb,
.color_space = .srgb_nonlinear_khr,
};
var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null);
const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count);
defer allocator.free(surface_formats);
_ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr);
for (surface_formats) |sfmt| {
if (std.meta.eql(sfmt, preferred)) {
return preferred;
}
}
return surface_formats[0]; // There must always be at least one supported surface format.
}
fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR {
var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null);
const present_modes = try allocator.alloc(vk.PresentModeKHR, count);
defer allocator.free(present_modes);
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr);
const preferred = [_]vk.PresentModeKHR{
.mailbox_khr,
.immediate_khr,
.fifo_relaxed_khr,
.fifo_khr,
};
for (preferred) |mode| {
if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) {
return mode;
}
}
return .fifo_khr;
}
fn findActualExtent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D {
if (caps.current_extent.width != 0xFFFF_FFFF) {
return caps.current_extent;
} else {
return .{
.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width),
.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height),
};
}
}

View File

@ -1,27 +0,0 @@
const vk = @import("vulkan");
pub const Vertex = struct {
pub const binding_description = vk.VertexInputBindingDescription{
.binding = 0,
.stride = @sizeOf(Vertex),
.input_rate = .vertex,
};
pub const attribute_description = [_]vk.VertexInputAttributeDescription{
.{
.binding = 0,
.location = 0,
.format = .r32g32_sfloat,
.offset = @offsetOf(Vertex, "pos"),
},
.{
.binding = 0,
.location = 1,
.format = .r32g32b32_sfloat,
.offset = @offsetOf(Vertex, "color"),
},
};
pos: [2]f32,
color: [3]f32,
};

View File

@ -1,487 +0,0 @@
const std = @import("std");
const glfw = @import("glfw");
const vk = @import("vulkan");
const resources = @import("resources");
const Allocator = std.mem.Allocator;
const GraphicsContext = @import("graphics_context.zig").GraphicsContext;
const Swapchain = @import("swapchain.zig").Swapchain;
const Vertex = @import("vertex.zig").Vertex;
// TODO: model loading!
const vertices = [_]Vertex{
.{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } },
.{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } },
.{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } },
};
pub const VulkanRenderer = struct {
gc: *const GraphicsContext,
allocator: Allocator,
window: *glfw.Window,
swapchain: Swapchain,
render_pass: vk.RenderPass,
pipeline_layout: vk.PipelineLayout,
pipeline: vk.Pipeline,
cmdbufs: []vk.CommandBuffer,
framebuffers: []vk.Framebuffer,
pool: vk.CommandPool,
buffer: vk.Buffer,
memory: vk.DeviceMemory,
viewport: vk.Viewport,
scissor: vk.Rect2D,
window_resized: bool,
new_extent: vk.Extent2D,
pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, window: *glfw.Window) !VulkanRenderer {
var self: VulkanRenderer = undefined;
self.window = window;
self.allocator = allocator;
window.setUserPointer(&self);
window.setFramebufferSizeCallback(resizeCallback);
self.gc = gc;
self.swapchain = try Swapchain.init(gc, allocator, extent);
self.pipeline_layout = try self.gc.vkd.createPipelineLayout(self.gc.dev, &.{
.flags = .{},
.set_layout_count = 0,
.p_set_layouts = undefined,
.push_constant_range_count = 0,
.p_push_constant_ranges = undefined,
}, null);
self.render_pass = try self.createRenderPass();
self.pipeline = try self.createPipeline();
self.framebuffers = try self.createFramebuffers();
self.pool = try gc.vkd.createCommandPool(gc.dev, &.{
.flags = .{},
.queue_family_index = gc.graphics_queue.family,
}, null);
self.buffer = try gc.vkd.createBuffer(gc.dev, &.{
.flags = .{},
.size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
.sharing_mode = .exclusive,
.queue_family_index_count = 0,
.p_queue_family_indices = undefined,
}, null);
const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, self.buffer);
self.memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true });
try gc.vkd.bindBufferMemory(gc.dev, self.buffer, self.memory, 0);
try self.uploadVertices();
self.cmdbufs = try self.createCommandBuffers(extent);
return self;
}
pub fn reinit(self: *VulkanRenderer, extent: vk.Extent2D) !void {
self.swapchain.recreate(extent);
}
pub fn deinit(self: *VulkanRenderer) void {
// todo: log?
self.swapchain.waitForAllFences() catch {};
self.destroyCommandBuffers();
self.gc.vkd.freeMemory(self.gc.dev, self.memory, null);
self.gc.vkd.destroyBuffer(self.gc.dev, self.buffer, null);
self.gc.vkd.destroyCommandPool(self.gc.dev, self.pool, null);
self.destroyFramebuffers();
self.gc.vkd.destroyPipeline(self.gc.dev, self.pipeline, null);
self.gc.vkd.destroyRenderPass(self.gc.dev, self.render_pass, null);
self.gc.vkd.destroyPipelineLayout(self.gc.dev, self.pipeline_layout, null);
self.swapchain.deinit();
}
pub fn drawFrame(self: *VulkanRenderer) !void {
const cmdbuf = self.cmdbufs[self.swapchain.image_index];
const state = self.swapchain.present(cmdbuf) catch |err| switch (err) {
error.OutOfDateKHR => Swapchain.PresentState.suboptimal,
else => |narrow| return narrow,
};
if (state == .suboptimal or self.window_resized) {
// todo: fn
self.destroyFramebuffers();
self.gc.vkd.destroyPipeline(self.gc.dev, self.pipeline, null);
self.gc.vkd.destroyPipelineLayout(self.gc.dev, self.pipeline_layout, null);
self.gc.vkd.destroyRenderPass(self.gc.dev, self.render_pass, null);
try self.swapchain.recreate(self.new_extent);
// todo: dedupe creation logic
self.pipeline_layout = try self.gc.vkd.createPipelineLayout(self.gc.dev, &.{
.flags = .{},
.set_layout_count = 0,
.p_set_layouts = undefined,
.push_constant_range_count = 0,
.p_push_constant_ranges = undefined,
}, null);
self.render_pass = try self.createRenderPass();
self.pipeline = try self.createPipeline();
self.framebuffers = try self.createFramebuffers();
self.window_resized = false;
}
}
fn createRenderPass(self: *VulkanRenderer) !vk.RenderPass {
const color_attachment = vk.AttachmentDescription{
.flags = .{},
.format = self.swapchain.surface_format.format,
.samples = .{ .@"1_bit" = true },
.load_op = .clear,
.store_op = .store,
.stencil_load_op = .dont_care,
.stencil_store_op = .dont_care,
.initial_layout = .@"undefined",
.final_layout = .present_src_khr,
};
const color_attachment_ref = vk.AttachmentReference{
.attachment = 0,
.layout = .color_attachment_optimal,
};
const subpass = vk.SubpassDescription{
.flags = .{},
.pipeline_bind_point = .graphics,
.input_attachment_count = 0,
.p_input_attachments = undefined,
.color_attachment_count = 1,
.p_color_attachments = @ptrCast([*]const vk.AttachmentReference, &color_attachment_ref),
.p_resolve_attachments = null,
.p_depth_stencil_attachment = null,
.preserve_attachment_count = 0,
.p_preserve_attachments = undefined,
};
return try self.gc.vkd.createRenderPass(self.gc.dev, &.{
.flags = .{},
.attachment_count = 1,
.p_attachments = @ptrCast([*]const vk.AttachmentDescription, &color_attachment),
.subpass_count = 1,
.p_subpasses = @ptrCast([*]const vk.SubpassDescription, &subpass),
.dependency_count = 0,
.p_dependencies = undefined,
}, null);
}
fn createPipeline(self: *VulkanRenderer) !vk.Pipeline {
const vert = try self.gc.vkd.createShaderModule(self.gc.dev, &.{
.flags = .{},
.code_size = resources.triangle_vert.len,
.p_code = @ptrCast([*]const u32, resources.triangle_vert),
}, null);
defer self.gc.vkd.destroyShaderModule(self.gc.dev, vert, null);
const frag = try self.gc.vkd.createShaderModule(self.gc.dev, &.{
.flags = .{},
.code_size = resources.triangle_frag.len,
.p_code = @ptrCast([*]const u32, resources.triangle_frag),
}, null);
defer self.gc.vkd.destroyShaderModule(self.gc.dev, frag, null);
const pssci = [_]vk.PipelineShaderStageCreateInfo{
.{
.flags = .{},
.stage = .{ .vertex_bit = true },
.module = vert,
.p_name = "main",
.p_specialization_info = null,
},
.{
.flags = .{},
.stage = .{ .fragment_bit = true },
.module = frag,
.p_name = "main",
.p_specialization_info = null,
},
};
const pvisci = vk.PipelineVertexInputStateCreateInfo{
.flags = .{},
.vertex_binding_description_count = 1,
.p_vertex_binding_descriptions = @ptrCast([*]const vk.VertexInputBindingDescription, &Vertex.binding_description),
.vertex_attribute_description_count = Vertex.attribute_description.len,
.p_vertex_attribute_descriptions = &Vertex.attribute_description,
};
const piasci = vk.PipelineInputAssemblyStateCreateInfo{
.flags = .{},
.topology = .triangle_list,
.primitive_restart_enable = vk.FALSE,
};
const pvsci = vk.PipelineViewportStateCreateInfo{
.flags = .{},
.viewport_count = 1,
.p_viewports = undefined, // set in createCommandBuffers iwth cmdSetViewport
.scissor_count = 1,
.p_scissors = undefined, //set in createCommandBuffers
};
const prsci = vk.PipelineRasterizationStateCreateInfo{
.flags = .{},
.depth_clamp_enable = vk.FALSE,
.rasterizer_discard_enable = vk.FALSE,
.polygon_mode = .fill,
.cull_mode = .{ .back_bit = true },
.front_face = .clockwise,
.depth_bias_enable = vk.FALSE,
.depth_bias_constant_factor = 0,
.depth_bias_clamp = 0,
.depth_bias_slope_factor = 0,
.line_width = 1,
};
const pmsci = vk.PipelineMultisampleStateCreateInfo{
.flags = .{},
.rasterization_samples = .{ .@"1_bit" = true },
.sample_shading_enable = vk.FALSE,
.min_sample_shading = 1,
.p_sample_mask = null,
.alpha_to_coverage_enable = vk.FALSE,
.alpha_to_one_enable = vk.FALSE,
};
const pcbas = vk.PipelineColorBlendAttachmentState{
.blend_enable = vk.FALSE,
.src_color_blend_factor = .one,
.dst_color_blend_factor = .zero,
.color_blend_op = .add,
.src_alpha_blend_factor = .one,
.dst_alpha_blend_factor = .zero,
.alpha_blend_op = .add,
.color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true },
};
const pcbsci = vk.PipelineColorBlendStateCreateInfo{
.flags = .{},
.logic_op_enable = vk.FALSE,
.logic_op = .copy,
.attachment_count = 1,
.p_attachments = @ptrCast([*]const vk.PipelineColorBlendAttachmentState, &pcbas),
.blend_constants = [_]f32{ 0, 0, 0, 0 },
};
const dynstate = [_]vk.DynamicState{ .viewport, .scissor };
const pdsci = vk.PipelineDynamicStateCreateInfo{
.flags = .{},
.dynamic_state_count = dynstate.len,
.p_dynamic_states = &dynstate,
};
const gpci = vk.GraphicsPipelineCreateInfo{
.flags = .{},
.stage_count = 2,
.p_stages = &pssci,
.p_vertex_input_state = &pvisci,
.p_input_assembly_state = &piasci,
.p_tessellation_state = null,
.p_viewport_state = &pvsci,
.p_rasterization_state = &prsci,
.p_multisample_state = &pmsci,
.p_depth_stencil_state = null,
.p_color_blend_state = &pcbsci,
.p_dynamic_state = &pdsci,
.layout = self.pipeline_layout,
.render_pass = self.render_pass,
.subpass = 0,
.base_pipeline_handle = .null_handle,
.base_pipeline_index = -1,
};
var pipeline: vk.Pipeline = undefined;
_ = try self.gc.vkd.createGraphicsPipelines(
self.gc.dev,
.null_handle,
1,
@ptrCast([*]const vk.GraphicsPipelineCreateInfo, &gpci),
null,
@ptrCast([*]vk.Pipeline, &pipeline),
);
return pipeline;
}
fn createFramebuffers(self: *VulkanRenderer) ![]vk.Framebuffer {
const framebuffers = try self.allocator.alloc(vk.Framebuffer, self.swapchain.swap_images.len);
errdefer self.allocator.free(framebuffers);
var i: usize = 0;
errdefer for (self.framebuffers[0..i]) |fb| self.gc.vkd.destroyFramebuffer(self.gc.dev, fb, null);
for (framebuffers) |*fb| {
fb.* = try self.gc.vkd.createFramebuffer(self.gc.dev, &.{
.flags = .{},
.render_pass = self.render_pass,
.attachment_count = 1,
.p_attachments = @ptrCast([*]const vk.ImageView, &self.swapchain.swap_images[i].view),
.width = self.swapchain.extent.width,
.height = self.swapchain.extent.height,
.layers = 1,
}, null);
i += 1;
}
return framebuffers;
}
fn createCommandBuffers(self: *VulkanRenderer, extent: vk.Extent2D) ![]vk.CommandBuffer {
const cmdbufs = try self.allocator.alloc(vk.CommandBuffer, self.framebuffers.len);
errdefer self.allocator.free(cmdbufs);
try self.gc.vkd.allocateCommandBuffers(self.gc.dev, &.{
.command_pool = self.pool,
.level = .primary,
.command_buffer_count = @truncate(u32, cmdbufs.len),
}, cmdbufs.ptr);
errdefer self.gc.vkd.freeCommandBuffers(self.gc.dev, self.pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr);
const clear = vk.ClearValue{
.color = .{ .float_32 = .{ 0, 0, 0, 1 } },
};
self.viewport = vk.Viewport{
.x = 0,
.y = 0,
.width = @intToFloat(f32, extent.width),
.height = @intToFloat(f32, extent.height),
.min_depth = 0,
.max_depth = 1,
};
self.scissor = vk.Rect2D{
.offset = .{ .x = 0, .y = 0 },
.extent = extent,
};
for (cmdbufs) |cmdbuf, i| {
_ = i;
try self.gc.vkd.beginCommandBuffer(cmdbuf, &.{
.flags = .{},
.p_inheritance_info = null,
});
self.gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast([*]const vk.Viewport, &self.viewport));
self.gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast([*]const vk.Rect2D, &self.scissor));
// This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627.
const render_area = vk.Rect2D{
.offset = .{ .x = 0, .y = 0 },
.extent = extent,
};
self.gc.vkd.cmdBeginRenderPass(cmdbuf, &.{
.render_pass = self.render_pass,
.framebuffer = self.framebuffers[i],
.render_area = render_area,
.clear_value_count = 1,
.p_clear_values = @ptrCast([*]const vk.ClearValue, &clear),
}, .@"inline");
self.gc.vkd.cmdBindPipeline(cmdbuf, .graphics, self.pipeline);
const offset = [_]vk.DeviceSize{0};
self.gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast([*]const vk.Buffer, &self.buffer), &offset);
self.gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0);
self.gc.vkd.cmdEndRenderPass(cmdbuf);
try self.gc.vkd.endCommandBuffer(cmdbuf);
}
return cmdbufs;
}
fn destroyCommandBuffers(self: *VulkanRenderer) void {
self.gc.vkd.freeCommandBuffers(self.gc.dev, self.pool, @truncate(u32, self.cmdbufs.len), self.cmdbufs.ptr);
self.allocator.free(self.cmdbufs);
}
fn destroyFramebuffers(self: *VulkanRenderer) void {
for (self.framebuffers) |fb| self.gc.vkd.destroyFramebuffer(self.gc.dev, fb, null);
self.allocator.free(self.framebuffers);
}
fn copyBuffer(self: *VulkanRenderer, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void {
var cmdbuf: vk.CommandBuffer = undefined;
try self.gc.vkd.allocateCommandBuffers(self.gc.dev, &.{
.command_pool = self.pool,
.level = .primary,
.command_buffer_count = 1,
}, @ptrCast([*]vk.CommandBuffer, &cmdbuf));
defer self.gc.vkd.freeCommandBuffers(self.gc.dev, self.pool, 1, @ptrCast([*]const vk.CommandBuffer, &cmdbuf));
try self.gc.vkd.beginCommandBuffer(cmdbuf, &.{
.flags = .{ .one_time_submit_bit = true },
.p_inheritance_info = null,
});
const region = vk.BufferCopy{
.src_offset = 0,
.dst_offset = 0,
.size = size,
};
self.gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast([*]const vk.BufferCopy, &region));
try self.gc.vkd.endCommandBuffer(cmdbuf);
const si = vk.SubmitInfo{
.wait_semaphore_count = 0,
.p_wait_semaphores = undefined,
.p_wait_dst_stage_mask = undefined,
.command_buffer_count = 1,
.p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf),
.signal_semaphore_count = 0,
.p_signal_semaphores = undefined,
};
try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, @ptrCast([*]const vk.SubmitInfo, &si), .null_handle);
try self.gc.vkd.queueWaitIdle(self.gc.graphics_queue.handle);
}
fn uploadVertices(self: *VulkanRenderer) !void {
const staging_buffer = try self.gc.vkd.createBuffer(self.gc.dev, &.{
.flags = .{},
.size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_src_bit = true },
.sharing_mode = .exclusive,
.queue_family_index_count = 0,
.p_queue_family_indices = undefined,
}, null);
defer self.gc.vkd.destroyBuffer(self.gc.dev, staging_buffer, null);
const mem_reqs = self.gc.vkd.getBufferMemoryRequirements(self.gc.dev, staging_buffer);
const staging_memory = try self.gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true });
defer self.gc.vkd.freeMemory(self.gc.dev, staging_memory, null);
try self.gc.vkd.bindBufferMemory(self.gc.dev, staging_buffer, staging_memory, 0);
{
const data = try self.gc.vkd.mapMemory(self.gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
defer self.gc.vkd.unmapMemory(self.gc.dev, staging_memory);
const gpu_vertices = @ptrCast([*]Vertex, @alignCast(@alignOf(Vertex), data));
for (vertices) |vertex, i| {
gpu_vertices[i] = vertex;
}
}
try self.copyBuffer(self.buffer, staging_buffer, @sizeOf(@TypeOf(vertices)));
}
};
fn resizeCallback(window: glfw.Window, width: u32, height: u32) void {
if (window.getUserPointer(VulkanRenderer)) |renderer| {
renderer.window_resized = true;
renderer.new_extent.width = width;
renderer.new_extent.height = height;
}
}