diff --git a/build.zig b/build.zig index 0bab88e..375be71 100644 --- a/build.zig +++ b/build.zig @@ -1,6 +1,80 @@ const std = @import("std"); -const pkgs = @import("deps.zig").pkgs; +const Step = std.build.Step; +const Builder = std.build.Builder; +const glfw = @import("deps.zig").pkgs.glfw; const build_pkgs = @import("deps.zig").build_pkgs; +const build_glfw = build_pkgs.build_glfw; +const vkgen = build_pkgs.build_vulkan; + +pub const ResourceGenStep = struct { + step: Step, + shader_step: *vkgen.ShaderCompileStep, + builder: *Builder, + package: std.build.Pkg, + output_file: std.build.GeneratedFile, + resources: std.ArrayList(u8), + + pub fn init(builder: *Builder, out: []const u8) *ResourceGenStep { + const self = builder.allocator.create(ResourceGenStep) catch unreachable; + const full_out_path = std.fs.path.join(builder.allocator, &[_][]const u8{ + builder.build_root, + out, + }) catch unreachable; + + self.* = .{ + .step = Step.init(.custom, "resources", builder.allocator, make), + .shader_step = vkgen.ShaderCompileStep.init(builder, &[_][]const u8{ "glslc", "--target-env=vulkan1.2" }, ""), + .builder = builder, + .package = .{ + .name = "resources", + .source = .{ .generated = &self.output_file }, + .dependencies = null, + }, + .output_file = .{ + .step = &self.step, + .path = full_out_path, + }, + .resources = std.ArrayList(u8).init(builder.allocator), + }; + + self.step.dependOn(&self.shader_step.step); + return self; + } + + fn renderPath(path: []const u8, writer: anytype) void { + const separators = &[_]u8{ std.fs.path.sep_windows, std.fs.path.sep_posix }; + var i: usize = 0; + while (std.mem.indexOfAnyPos(u8, path, i, separators)) |j| { + writer.writeAll(path[i..j]) catch unreachable; + switch (std.fs.path.sep) { + std.fs.path.sep_windows => writer.writeAll("\\\\") catch unreachable, + std.fs.path.sep_posix => writer.writeByte(std.fs.path.sep_posix) catch unreachable, + else => unreachable, + } + + i = j + 1; + } + writer.writeAll(path[i..]) catch unreachable; + } + + pub fn addShader(self: *ResourceGenStep, name: []const u8, source: []const u8) void { + const shader_out_path = self.shader_step.add(source); + var writer = self.resources.writer(); + + writer.print("pub const {s} = @embedFile(\"", .{name}) catch unreachable; + renderPath(shader_out_path, writer); + writer.writeAll("\");\n") catch unreachable; + } + + fn make(step: *Step) !void { + const self = @fieldParentPtr(ResourceGenStep, "step", step); + const cwd = std.fs.cwd(); + + const dir = std.fs.path.dirname(self.output_file.path.?).?; + try cwd.makePath(dir); + try cwd.writeFile(self.output_file.path.?, self.resources.items); + } +}; pub fn build(b: *std.build.Builder) void { const target = b.standardTargetOptions(.{}); @@ -10,10 +84,16 @@ pub fn build(b: *std.build.Builder) void { exe.setTarget(target); exe.setBuildMode(mode); - const gen = build_pkgs.build_vulkan.VkGenerateStep.init(b, "etc/vk.xml", "vk.zig"); - exe.addPackage(pkgs.glfw); + const gen = vkgen.VkGenerateStep.init(b, "etc/vk.xml", "vk.zig"); + exe.addPackage(glfw); exe.addPackage(gen.package); - build_pkgs.build_glfw.link(b, exe, .{}); + build_glfw.link(b, exe, .{}); + + const res = ResourceGenStep.init(b, "resources.zig"); + res.addShader("test_frag", "src/shaders/test.frag"); + res.addShader("triangle_vert", "src/shaders/triangle.vert"); + res.addShader("triangle_frag", "src/shaders/triangle.frag"); + exe.addPackage(res.package); exe.install(); diff --git a/src/main.zig b/src/main.zig index b10332b..a9c4621 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,14 +1,515 @@ const std = @import("std"); const glfw = @import("glfw"); +const vk = @import("vulkan"); +const resources = @import("resources"); +const GraphicsContext = @import("render/graphics_context.zig").GraphicsContext; +const Swapchain = @import("render/swapchain.zig").Swapchain; +// TODO: +const Allocator = std.mem.Allocator; + +const app_name = "efemra"; + +const Vertex = struct { + const binding_description = vk.VertexInputBindingDescription{ + .binding = 0, + .stride = @sizeOf(Vertex), + .input_rate = .vertex, + }; + + const attribute_description = [_]vk.VertexInputAttributeDescription{ + .{ + .binding = 0, + .location = 0, + .format = .r32g32_sfloat, + .offset = @offsetOf(Vertex, "pos"), + }, + .{ + .binding = 0, + .location = 1, + .format = .r32g32b32_sfloat, + .offset = @offsetOf(Vertex, "color"), + }, + }; + + pos: [2]f32, + color: [3]f32, +}; + +const vertices = [_]Vertex{ + .{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, +}; pub fn main() !void { try glfw.init(.{}); defer glfw.terminate(); - const window = try glfw.Window.create(1280, 720, "YOOOO", null, null, .{}); + var extent = vk.Extent2D{ .width = 1280, .height = 720 }; + + var window = try glfw.Window.create(extent.height, extent.width, app_name, null, null, .{}); defer window.destroy(); + // temp allocator + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); + + const gc = try GraphicsContext.init(allocator, app_name, &window); + defer gc.deinit(); + + std.debug.print("Using device: {s}\n", .{gc.deviceName()}); + + var swapchain = try Swapchain.init(&gc, allocator, extent); + defer swapchain.deinit(); + + const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ + .flags = .{}, + .set_layout_count = 0, + .p_set_layouts = undefined, + .push_constant_range_count = 0, + .p_push_constant_ranges = undefined, + }, null); + defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); + + const render_pass = try createRenderPass(&gc, swapchain); + defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null); + + const pipeline = try createPipeline(&gc, pipeline_layout, render_pass); + defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); + + var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); + defer destroyFramebuffers(&gc, allocator, framebuffers); + + const pool = try gc.vkd.createCommandPool(gc.dev, &.{ + .flags = .{}, + .queue_family_index = gc.graphics_queue.family, + }, null); + defer gc.vkd.destroyCommandPool(gc.dev, pool, null); + + const buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .flags = .{}, + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, + .sharing_mode = .exclusive, + .queue_family_index_count = 0, + .p_queue_family_indices = undefined, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer); + const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, memory, null); + try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0); + + try uploadVertices(&gc, pool, buffer); + + var cmdbufs = try createCommandBuffers( + &gc, + pool, + allocator, + buffer, + swapchain.extent, + render_pass, + pipeline, + framebuffers, + ); + defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); + while (!window.shouldClose()) { + const cmdbuf = cmdbufs[swapchain.image_index]; + + const state = swapchain.present(cmdbuf) catch |err| switch (err) { + error.OutOfDateKHR => Swapchain.PresentState.suboptimal, + else => |narrow| return narrow, + }; + + const size = try window.getSize(); + + if (state == .suboptimal or extent.width != size.width or extent.height != size.height) { + extent.width = size.width; + extent.height = size.height; + try swapchain.recreate(extent); + + destroyFramebuffers(&gc, allocator, framebuffers); + framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); + + destroyCommandBuffers(&gc, pool, allocator, cmdbufs); + cmdbufs = try createCommandBuffers( + &gc, + pool, + allocator, + buffer, + swapchain.extent, + render_pass, + pipeline, + framebuffers, + ); + } + try glfw.pollEvents(); } + + try swapchain.waitForAllFences(); +} + +fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void { + const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .flags = .{}, + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_src_bit = true }, + .sharing_mode = .exclusive, + .queue_family_index_count = 0, + .p_queue_family_indices = undefined, + }, null); + + defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer); + const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); + defer gc.vkd.freeMemory(gc.dev, staging_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0); + + { + const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); + defer gc.vkd.unmapMemory(gc.dev, staging_memory); + + const gpu_vertices = @ptrCast([*]Vertex, @alignCast(@alignOf(Vertex), data)); + for (vertices) |vertex, i| { + gpu_vertices[i] = vertex; + } + } + + try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices))); +} + +fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { + var cmdbuf: vk.CommandBuffer = undefined; + try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = 1, + }, @ptrCast([*]vk.CommandBuffer, &cmdbuf)); + defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast([*]const vk.CommandBuffer, &cmdbuf)); + + try gc.vkd.beginCommandBuffer(cmdbuf, &.{ + .flags = .{ .one_time_submit_bit = true }, + .p_inheritance_info = null, + }); + + const region = vk.BufferCopy{ + .src_offset = 0, + .dst_offset = 0, + .size = size, + }; + gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast([*]const vk.BufferCopy, ®ion)); + + try gc.vkd.endCommandBuffer(cmdbuf); + + const si = vk.SubmitInfo{ + .wait_semaphore_count = 0, + .p_wait_semaphores = undefined, + .p_wait_dst_stage_mask = undefined, + .command_buffer_count = 1, + .p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf), + .signal_semaphore_count = 0, + .p_signal_semaphores = undefined, + }; + try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast([*]const vk.SubmitInfo, &si), .null_handle); + try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); +} + +fn createCommandBuffers( + gc: *const GraphicsContext, + pool: vk.CommandPool, + allocator: Allocator, + buffer: vk.Buffer, + extent: vk.Extent2D, + render_pass: vk.RenderPass, + pipeline: vk.Pipeline, + framebuffers: []vk.Framebuffer, +) ![]vk.CommandBuffer { + const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); + errdefer allocator.free(cmdbufs); + + _ = pipeline; + _ = render_pass; + _ = extent; + _ = buffer; + _ = pool; + _ = gc; + + try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = @truncate(u32, cmdbufs.len), + }, cmdbufs.ptr); + errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr); + + const clear = vk.ClearValue{ + .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, + }; + + const viewport = vk.Viewport{ + .x = 0, + .y = 0, + .width = @intToFloat(f32, extent.width), + .height = @intToFloat(f32, extent.height), + .min_depth = 0, + .max_depth = 1, + }; + + const scissor = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + for (cmdbufs) |cmdbuf, i| { + _ = i; + try gc.vkd.beginCommandBuffer(cmdbuf, &.{ + .flags = .{}, + .p_inheritance_info = null, + }); + + gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast([*]const vk.Viewport, &viewport)); + gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast([*]const vk.Rect2D, &scissor)); + + // This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. + const render_area = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + gc.vkd.cmdBeginRenderPass(cmdbuf, &.{ + .render_pass = render_pass, + .framebuffer = framebuffers[i], + .render_area = render_area, + .clear_value_count = 1, + .p_clear_values = @ptrCast([*]const vk.ClearValue, &clear), + }, .@"inline"); + + gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); + const offset = [_]vk.DeviceSize{0}; + gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast([*]const vk.Buffer, &buffer), &offset); + gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); + + gc.vkd.cmdEndRenderPass(cmdbuf); + try gc.vkd.endCommandBuffer(cmdbuf); + } + + return cmdbufs; +} + +fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { + gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr); + allocator.free(cmdbufs); +} + +fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer { + const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); + errdefer allocator.free(framebuffers); + + var i: usize = 0; + errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); + + for (framebuffers) |*fb| { + fb.* = try gc.vkd.createFramebuffer(gc.dev, &.{ + .flags = .{}, + .render_pass = render_pass, + .attachment_count = 1, + .p_attachments = @ptrCast([*]const vk.ImageView, &swapchain.swap_images[i].view), + .width = swapchain.extent.width, + .height = swapchain.extent.height, + .layers = 1, + }, null); + i += 1; + } + + return framebuffers; +} + +fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { + for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); + allocator.free(framebuffers); +} + +fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass { + const color_attachment = vk.AttachmentDescription{ + .flags = .{}, + .format = swapchain.surface_format.format, + .samples = .{ .@"1_bit" = true }, + .load_op = .clear, + .store_op = .store, + .stencil_load_op = .dont_care, + .stencil_store_op = .dont_care, + .initial_layout = .@"undefined", + .final_layout = .present_src_khr, + }; + + const color_attachment_ref = vk.AttachmentReference{ + .attachment = 0, + .layout = .color_attachment_optimal, + }; + + const subpass = vk.SubpassDescription{ + .flags = .{}, + .pipeline_bind_point = .graphics, + .input_attachment_count = 0, + .p_input_attachments = undefined, + .color_attachment_count = 1, + .p_color_attachments = @ptrCast([*]const vk.AttachmentReference, &color_attachment_ref), + .p_resolve_attachments = null, + .p_depth_stencil_attachment = null, + .preserve_attachment_count = 0, + .p_preserve_attachments = undefined, + }; + + return try gc.vkd.createRenderPass(gc.dev, &.{ + .flags = .{}, + .attachment_count = 1, + .p_attachments = @ptrCast([*]const vk.AttachmentDescription, &color_attachment), + .subpass_count = 1, + .p_subpasses = @ptrCast([*]const vk.SubpassDescription, &subpass), + .dependency_count = 0, + .p_dependencies = undefined, + }, null); +} + +fn createPipeline( + gc: *const GraphicsContext, + layout: vk.PipelineLayout, + render_pass: vk.RenderPass, +) !vk.Pipeline { + const vert = try gc.vkd.createShaderModule(gc.dev, &.{ + .flags = .{}, + .code_size = resources.triangle_vert.len, + .p_code = @ptrCast([*]const u32, resources.triangle_vert), + }, null); + defer gc.vkd.destroyShaderModule(gc.dev, vert, null); + + const frag = try gc.vkd.createShaderModule(gc.dev, &.{ + .flags = .{}, + .code_size = resources.triangle_frag.len, + .p_code = @ptrCast([*]const u32, resources.triangle_frag), + }, null); + defer gc.vkd.destroyShaderModule(gc.dev, frag, null); + + const pssci = [_]vk.PipelineShaderStageCreateInfo{ + .{ + .flags = .{}, + .stage = .{ .vertex_bit = true }, + .module = vert, + .p_name = "main", + .p_specialization_info = null, + }, + .{ + .flags = .{}, + .stage = .{ .fragment_bit = true }, + .module = frag, + .p_name = "main", + .p_specialization_info = null, + }, + }; + + const pvisci = vk.PipelineVertexInputStateCreateInfo{ + .flags = .{}, + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = @ptrCast([*]const vk.VertexInputBindingDescription, &Vertex.binding_description), + .vertex_attribute_description_count = Vertex.attribute_description.len, + .p_vertex_attribute_descriptions = &Vertex.attribute_description, + }; + + const piasci = vk.PipelineInputAssemblyStateCreateInfo{ + .flags = .{}, + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }; + + const pvsci = vk.PipelineViewportStateCreateInfo{ + .flags = .{}, + .viewport_count = 1, + .p_viewports = undefined, // set in createCommandBuffers iwth cmdSetViewport + .scissor_count = 1, + .p_scissors = undefined, //set in createCommandBuffers + }; + + const prsci = vk.PipelineRasterizationStateCreateInfo{ + .flags = .{}, + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0, + .depth_bias_clamp = 0, + .depth_bias_slope_factor = 0, + .line_width = 1, + }; + + const pmsci = vk.PipelineMultisampleStateCreateInfo{ + .flags = .{}, + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .p_sample_mask = null, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }; + + const pcbas = vk.PipelineColorBlendAttachmentState{ + .blend_enable = vk.FALSE, + .src_color_blend_factor = .one, + .dst_color_blend_factor = .zero, + .color_blend_op = .add, + .src_alpha_blend_factor = .one, + .dst_alpha_blend_factor = .zero, + .alpha_blend_op = .add, + .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + }; + + const pcbsci = vk.PipelineColorBlendStateCreateInfo{ + .flags = .{}, + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .attachment_count = 1, + .p_attachments = @ptrCast([*]const vk.PipelineColorBlendAttachmentState, &pcbas), + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + }; + + const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; + const pdsci = vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = dynstate.len, + .p_dynamic_states = &dynstate, + }; + + const gpci = vk.GraphicsPipelineCreateInfo{ + .flags = .{}, + .stage_count = 2, + .p_stages = &pssci, + .p_vertex_input_state = &pvisci, + .p_input_assembly_state = &piasci, + .p_tessellation_state = null, + .p_viewport_state = &pvsci, + .p_rasterization_state = &prsci, + .p_multisample_state = &pmsci, + .p_depth_stencil_state = null, + .p_color_blend_state = &pcbsci, + .p_dynamic_state = &pdsci, + .layout = layout, + .render_pass = render_pass, + .subpass = 0, + .base_pipeline_handle = .null_handle, + .base_pipeline_index = -1, + }; + + var pipeline: vk.Pipeline = undefined; + _ = try gc.vkd.createGraphicsPipelines( + gc.dev, + .null_handle, + 1, + @ptrCast([*]const vk.GraphicsPipelineCreateInfo, &gpci), + null, + @ptrCast([*]vk.Pipeline, &pipeline), + ); + return pipeline; } diff --git a/src/render/graphics_context.zig b/src/render/graphics_context.zig new file mode 100644 index 0000000..9c31c82 --- /dev/null +++ b/src/render/graphics_context.zig @@ -0,0 +1,357 @@ +const std = @import("std"); +const vk = @import("vulkan"); +const glfw = @import("glfw"); +const Allocator = std.mem.Allocator; + +const required_device_extensions = [_][*:0]const u8{vk.extension_info.khr_swapchain.name}; + +const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, +}); + +const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, + .createDevice = true, + .destroySurfaceKHR = true, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .enumerateDeviceExtensionProperties = true, + .getPhysicalDeviceSurfaceFormatsKHR = true, + .getPhysicalDeviceSurfacePresentModesKHR = true, + .getPhysicalDeviceSurfaceCapabilitiesKHR = true, + .getPhysicalDeviceQueueFamilyProperties = true, + .getPhysicalDeviceSurfaceSupportKHR = true, + .getPhysicalDeviceMemoryProperties = true, + .getDeviceProcAddr = true, +}); + +const DeviceDispatch = vk.DeviceWrapper(.{ + .destroyDevice = true, + .getDeviceQueue = true, + .createSemaphore = true, + .createFence = true, + .createImageView = true, + .destroyImageView = true, + .destroySemaphore = true, + .destroyFence = true, + .getSwapchainImagesKHR = true, + .createSwapchainKHR = true, + .destroySwapchainKHR = true, + .acquireNextImageKHR = true, + .deviceWaitIdle = true, + .waitForFences = true, + .resetFences = true, + .queueSubmit = true, + .queuePresentKHR = true, + .createCommandPool = true, + .destroyCommandPool = true, + .allocateCommandBuffers = true, + .freeCommandBuffers = true, + .queueWaitIdle = true, + .createShaderModule = true, + .destroyShaderModule = true, + .createPipelineLayout = true, + .destroyPipelineLayout = true, + .createRenderPass = true, + .destroyRenderPass = true, + .createGraphicsPipelines = true, + .destroyPipeline = true, + .createFramebuffer = true, + .destroyFramebuffer = true, + .beginCommandBuffer = true, + .endCommandBuffer = true, + .allocateMemory = true, + .freeMemory = true, + .createBuffer = true, + .destroyBuffer = true, + .getBufferMemoryRequirements = true, + .mapMemory = true, + .unmapMemory = true, + .bindBufferMemory = true, + .cmdBeginRenderPass = true, + .cmdEndRenderPass = true, + .cmdBindPipeline = true, + .cmdDraw = true, + .cmdSetViewport = true, + .cmdSetScissor = true, + .cmdBindVertexBuffers = true, + .cmdCopyBuffer = true, +}); + +pub const GraphicsContext = struct { + vkb: BaseDispatch, + vki: InstanceDispatch, + vkd: DeviceDispatch, + + instance: vk.Instance, + surface: vk.SurfaceKHR, + pdev: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + mem_props: vk.PhysicalDeviceMemoryProperties, + + dev: vk.Device, + graphics_queue: Queue, + present_queue: Queue, + + pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *glfw.Window) !GraphicsContext { + var self: GraphicsContext = undefined; + self.vkb = try BaseDispatch.load(getInstanceProcWrapper); + + const glfw_exts = try glfw.getRequiredInstanceExtensions(); + + const app_info = vk.ApplicationInfo{ + .p_application_name = app_name, + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = app_name, + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_2, + }; + + self.instance = try self.vkb.createInstance(&.{ + .flags = .{}, + .p_application_info = &app_info, + .enabled_layer_count = 0, + .pp_enabled_layer_names = undefined, + .enabled_extension_count = @intCast(u32, glfw_exts.len), + .pp_enabled_extension_names = @ptrCast([*]const [*:0]const u8, glfw_exts), + }, null); + + self.vki = try InstanceDispatch.load(self.instance, glfw.getInstanceProcAddress); + errdefer self.vki.destroyInstance(self.instance, null); + + self.surface = try createSurface(self.instance, window); + errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null); + + const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface); + self.pdev = candidate.pdev; + self.props = candidate.props; + self.dev = try initializeCandidate(self.vki, candidate); + self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr); + errdefer self.vkd.destroyDevice(self.dev, null); + + self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family); + self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family); + + self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev); + + return self; + } + + pub fn deinit(self: GraphicsContext) void { + self.vkd.destroyDevice(self.dev, null); + self.vki.destroySurfaceKHR(self.instance, self.surface, null); + self.vki.destroyInstance(self.instance, null); + } + + pub fn deviceName(self: GraphicsContext) []const u8 { + const len = std.mem.indexOfScalar(u8, &self.props.device_name, 0).?; + return self.props.device_name[0..len]; + } + + pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 { + // wow wtf is this stuff + for (self.mem_props.memory_types[0..self.mem_props.memory_type_count]) |mem_type, i| { + if (memory_type_bits & (@as(u32, 1) << @truncate(u5, i)) != 0 and mem_type.property_flags.contains(flags)) { + return @truncate(u32, i); + } + } + + return error.NoSuitableMemoryType; + } + + pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { + return try self.vkd.allocateMemory(self.dev, &.{ + .allocation_size = requirements.size, + .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), + }, null); + } + + fn getInstanceProcWrapper(inst: vk.Instance, name: [*:0]const u8) ?glfw.VKProc { + return glfw.getInstanceProcAddress(&inst, name); + } +}; + +pub const Queue = struct { + handle: vk.Queue, + family: u32, + + fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue { + return .{ + .handle = vkd.getDeviceQueue(dev, family, 0), + .family = family, + }; + } +}; + +fn createSurface(instance: vk.Instance, window: *glfw.Window) !vk.SurfaceKHR { + var surface: vk.SurfaceKHR = undefined; + if (glfw.createWindowSurface(instance, window.*, null, &surface)) { + return surface; + } else |_| return error.SurfaceInitFailed; +} + +fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device { + const priority = [_]f32{1}; + const qci = [_]vk.DeviceQueueCreateInfo{ + .{ + .flags = .{}, + .queue_family_index = candidate.queues.graphics_family, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + .{ + .flags = .{}, + .queue_family_index = candidate.queues.present_family, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + }; + + const queue_count: u32 = if (candidate.queues.graphics_family == candidate.queues.present_family) + 1 + else + 2; + + return try vki.createDevice(candidate.pdev, &.{ + .flags = .{}, + .queue_create_info_count = queue_count, + .p_queue_create_infos = &qci, + .enabled_layer_count = 0, + .pp_enabled_layer_names = undefined, + .enabled_extension_count = required_device_extensions.len, + .pp_enabled_extension_names = @ptrCast([*]const [*:0]const u8, &required_device_extensions), + .p_enabled_features = null, + }, null); +} + +const DeviceCandidate = struct { + pdev: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + queues: QueueAllocation, +}; + +const QueueAllocation = struct { + graphics_family: u32, + present_family: u32, +}; + +fn pickPhysicalDevice( + vki: InstanceDispatch, + instance: vk.Instance, + allocator: Allocator, + surface: vk.SurfaceKHR, +) !DeviceCandidate { + var device_count: u32 = undefined; + _ = try vki.enumeratePhysicalDevices(instance, &device_count, null); + + const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count); + defer allocator.free(pdevs); + + _ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr); + + for (pdevs) |pdev| { + if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| { + return candidate; + } + } + + return error.NoSuitableDevice; +} + +fn checkSuitable( + vki: InstanceDispatch, + pdev: vk.PhysicalDevice, + allocator: Allocator, + surface: vk.SurfaceKHR, +) !?DeviceCandidate { + const props = vki.getPhysicalDeviceProperties(pdev); + + if (!try checkExtensionSupport(vki, pdev, allocator)) { + return null; + } + + if (!try checkSurfaceSupport(vki, pdev, surface)) { + return null; + } + + if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| { + return DeviceCandidate{ + .pdev = pdev, + .props = props, + .queues = allocation, + }; + } + + return null; +} + +fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation { + var family_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + + const families = try allocator.alloc(vk.QueueFamilyProperties, family_count); + defer allocator.free(families); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + var graphics_family: ?u32 = null; + var present_family: ?u32 = null; + + for (families) |properties, i| { + const family = @intCast(u32, i); + + if (graphics_family == null and properties.queue_flags.graphics_bit) { + graphics_family = family; + } + + if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) { + present_family = family; + } + } + + if (graphics_family != null and present_family != null) { + return QueueAllocation{ + .graphics_family = graphics_family.?, + .present_family = present_family.?, + }; + } + + return null; +} + +fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool { + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + + var present_mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); + + return format_count > 0 and present_mode_count > 0; +} + +fn checkExtensionSupport( + vki: InstanceDispatch, + pdev: vk.PhysicalDevice, + allocator: Allocator, +) !bool { + var count: u32 = undefined; + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null); + + const propsv = try allocator.alloc(vk.ExtensionProperties, count); + defer allocator.free(propsv); + + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr); + + for (required_device_extensions) |ext| { + for (propsv) |props| { + const len = std.mem.indexOfScalar(u8, &props.extension_name, 0).?; + const prop_ext_name = props.extension_name[0..len]; + if (std.mem.eql(u8, std.mem.span(ext), prop_ext_name)) { + break; + } + } else { + return false; + } + } + + return true; +} diff --git a/src/render/swapchain.zig b/src/render/swapchain.zig new file mode 100644 index 0000000..8d17600 --- /dev/null +++ b/src/render/swapchain.zig @@ -0,0 +1,327 @@ +const std = @import("std"); +const vk = @import("vulkan"); +const GraphicsContext = @import("graphics_context.zig").GraphicsContext; + +// from https://github.com/Snektron/vulkan-zig/blob/master/examples/swapchain.zig + +// todo? +const Allocator = std.mem.Allocator; + +pub const Swapchain = struct { + pub const PresentState = enum { optimal, suboptimal }; + + gc: *const GraphicsContext, + allocator: Allocator, + + surface_format: vk.SurfaceFormatKHR, + present_mode: vk.PresentModeKHR, + extent: vk.Extent2D, + handle: vk.SwapchainKHR, + + swap_images: []SwapImage, + image_index: u32, + next_image_acquired: vk.Semaphore, + + pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { + return try initRecycle(gc, allocator, extent, .null_handle); + } + + pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { + const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); + const actual_extent = findActualExtent(caps, extent); + if (actual_extent.width == 0 or actual_extent.height == 0) { + return error.InvalidSurfaceDimensions; + } + + const surface_format = try findSurfaceFormat(gc, allocator); + const present_mode = try findPresentMode(gc, allocator); + + var image_count = caps.min_image_count + 1; + if (caps.max_image_count > 0) { + image_count = std.math.min(image_count, caps.max_image_count); + } + + const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family }; + const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family) + .concurrent + else + .exclusive; + + const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ + .flags = .{}, + .surface = gc.surface, + .min_image_count = image_count, + .image_format = surface_format.format, + .image_color_space = surface_format.color_space, + .image_extent = actual_extent, + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, + .image_sharing_mode = sharing_mode, + .queue_family_index_count = qfi.len, + .p_queue_family_indices = &qfi, + .pre_transform = caps.current_transform, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = present_mode, + .clipped = vk.TRUE, + .old_swapchain = old_handle, + }, null); + errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null); + + if (old_handle != .null_handle) { + // Apparently, the old swapchain handle still needs to be destroyed after recreating. + gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null); + } + + const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator); + errdefer { + for (swap_images) |si| si.deinit(gc); + allocator.free(swap_images); + } + + var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{ .flags = .{} }, null); + errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null); + + const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle); + if (result.result != .success) { + return error.ImageAcquireFailed; + } + + std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); + return Swapchain{ + .gc = gc, + .allocator = allocator, + .surface_format = surface_format, + .present_mode = present_mode, + .extent = actual_extent, + .handle = handle, + .swap_images = swap_images, + .image_index = result.image_index, + .next_image_acquired = next_image_acquired, + }; + } + + fn deinitExceptSwapchain(self: Swapchain) void { + for (self.swap_images) |si| si.deinit(self.gc); + self.allocator.free(self.swap_images); + self.gc.vkd.destroySemaphore(self.gc.dev, self.next_image_acquired, null); + } + + pub fn waitForAllFences(self: Swapchain) !void { + for (self.swap_images) |si| si.waitForFence(self.gc) catch {}; + } + + pub fn deinit(self: Swapchain) void { + self.deinitExceptSwapchain(); + self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null); + } + + pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void { + const gc = self.gc; + const allocator = self.allocator; + const old_handle = self.handle; + self.deinitExceptSwapchain(); + self.* = try initRecycle(gc, allocator, new_extent, old_handle); + } + + pub fn currentImage(self: Swapchain) vk.Image { + return self.swap_images[self.image_index].image; + } + + pub fn currentSwapImage(self: Swapchain) *const SwapImage { + return &self.swap_images[self.image_index]; + } + + // TODO: switch to whatever pim does? + pub fn present(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState { + // Simple method: + // 1) Acquire next image + // 2) Wait for and reset fence of the acquired image + // 3) Submit command buffer with fence of acquired image, + // dependendent on the semaphore signalled by the first step. + // 4) Present current frame, dependent on semaphore signalled by previous step + // Problem: This way we can't reference the current image while rendering. + // Better method: Shuffle the steps around such that acquire next image is the last step, + // leaving the swapchain in a state with the current image. + // 1) Wait for and reset fence of current image + // 2) Submit command buffer, signalling fence of current image and dependent on + // the semaphore signalled by step 4. + // 3) Present current frame, dependent on semaphore signalled by the submit + // 4) Acquire next image, signalling its semaphore + // One problem that arises is that we can't know beforehand which semaphore to signal, + // so we keep an extra auxilery semaphore that is swapped around + + // Step 1: Make sure the current frame has finished rendering + const current = self.currentSwapImage(); + try current.waitForFence(self.gc); + try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast([*]const vk.Fence, ¤t.frame_fence)); + + // Step 2: Submit the command buffer + const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; + try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast([*]const vk.Semaphore, ¤t.image_acquired), + .p_wait_dst_stage_mask = &wait_stage, + .command_buffer_count = 1, + .p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast([*]const vk.Semaphore, ¤t.render_finished), + }}, current.frame_fence); + + // Step 3: Present the current frame + _ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast([*]const vk.Semaphore, ¤t.render_finished), + .swapchain_count = 1, + .p_swapchains = @ptrCast([*]const vk.SwapchainKHR, &self.handle), + .p_image_indices = @ptrCast([*]const u32, &self.image_index), + .p_results = null, + }); + + // Step 4: Acquire next frame + const result = try self.gc.vkd.acquireNextImageKHR( + self.gc.dev, + self.handle, + std.math.maxInt(u64), + self.next_image_acquired, + .null_handle, + ); + + std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.next_image_acquired); + self.image_index = result.image_index; + + return switch (result.result) { + .success => .optimal, + .suboptimal_khr => .suboptimal, + else => unreachable, + }; + } +}; + +const SwapImage = struct { + image: vk.Image, + view: vk.ImageView, + image_acquired: vk.Semaphore, + render_finished: vk.Semaphore, + frame_fence: vk.Fence, + + pub fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { + const view = try gc.vkd.createImageView(gc.dev, &.{ + .flags = .{}, + .image = image, + .view_type = .@"2d", + .format = format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + errdefer gc.vkd.destroyImageView(gc.dev, view, null); + + const image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{ .flags = .{} }, null); + errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null); + + const render_finished = try gc.vkd.createSemaphore(gc.dev, &.{ .flags = .{} }, null); + errdefer gc.vkd.destroySemaphore(gc.dev, render_finished, null); + + const frame_fence = try gc.vkd.createFence(gc.dev, &.{ .flags = .{ .signaled_bit = true } }, null); + errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null); + + return SwapImage{ + .image = image, + .view = view, + .image_acquired = image_acquired, + .render_finished = render_finished, + .frame_fence = frame_fence, + }; + } + + fn deinit(self: SwapImage, gc: *const GraphicsContext) void { + self.waitForFence(gc) catch return; + gc.vkd.destroyImageView(gc.dev, self.view, null); + gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); + gc.vkd.destroySemaphore(gc.dev, self.render_finished, null); + gc.vkd.destroyFence(gc.dev, self.frame_fence, null); + } + + fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { + _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast([*]const vk.Fence, &self.frame_fence), vk.TRUE, std.math.maxInt(u64)); + } +}; + +fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { + var count: u32 = undefined; + _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null); + const images = try allocator.alloc(vk.Image, count); + defer allocator.free(images); + _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr); + + const swap_images = try allocator.alloc(SwapImage, count); + errdefer allocator.free(swap_images); + + var i: usize = 0; + errdefer for (swap_images[0..i]) |si| si.deinit(gc); + + for (images) |image| { + swap_images[i] = try SwapImage.init(gc, image, format); + i += 1; + } + + return swap_images; +} + +fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { + const preferred = vk.SurfaceFormatKHR{ + .format = .b8g8r8a8_srgb, + .color_space = .srgb_nonlinear_khr, + }; + + var count: u32 = undefined; + _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null); + const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count); + defer allocator.free(surface_formats); + _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr); + + for (surface_formats) |sfmt| { + if (std.meta.eql(sfmt, preferred)) { + return preferred; + } + } + + return surface_formats[0]; // There must always be at least one supported surface format. +} + +fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { + var count: u32 = undefined; + _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); + const present_modes = try allocator.alloc(vk.PresentModeKHR, count); + defer allocator.free(present_modes); + _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr); + + const preferred = [_]vk.PresentModeKHR{ + .mailbox_khr, + .immediate_khr, + }; + + for (preferred) |mode| { + if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) { + return mode; + } + } + + return .fifo_khr; +} + +fn findActualExtent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D { + if (caps.current_extent.width != 0xFFFF_FFFF) { + return caps.current_extent; + } else { + return .{ + .width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width), + .height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height), + }; + } +} diff --git a/src/shaders/test.frag b/src/shaders/test.frag new file mode 100644 index 0000000..6e10ff1 --- /dev/null +++ b/src/shaders/test.frag @@ -0,0 +1,36 @@ +// from https://web.engr.oregonstate.edu/~mjb/vulkan/Handouts/ShadersAndSpirv.1pp.pdf + +#version 400 +#extension GL_ARB_separate_shader_objects : enable +#extension GL_ARB_shading_language_420pack : enable + +layout( std140, set = 0, binding = 0 ) uniform matBuf +{ + mat4 uModelMatrix; + mat4 uViewMatrix; + mat4 uProjectionMatrix; + mat3 uNormalMatrix; +} Matrices; + +// non-opaque must be in a uniform block: +layout( std140, set = 1, binding = 0 ) uniform lightBuf +{ + vec4 uLightPos; +} Light; + +layout( location = 0 ) in vec3 aVertex; +layout( location = 1 ) in vec3 aNormal; +layout( location = 2 ) in vec3 aColor; +layout( location = 3 ) in vec2 aTexCoord; +layout ( location = 0 ) out vec3 vNormal; +layout ( location = 1 ) out vec3 vColor; +layout ( location = 2 ) out vec2 vTexCoord; + +void main() +{ + mat4 PVM = Matrices.uProjectionMatrix * Matrices.uViewMatrix * Matrices.uModelMatrix; + //gl_Position = PVM * vec4( aVertex, 1. ); + vNormal = Matrices.uNormalMatrix * aNormal; + vColor = aColor; + vTexCoord = aTexCoord; +} diff --git a/src/shaders/triangle.frag b/src/shaders/triangle.frag new file mode 100644 index 0000000..8c952fe --- /dev/null +++ b/src/shaders/triangle.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) in vec3 v_color; + +layout(location = 0) out vec4 f_color; + +void main() { + f_color = vec4(v_color, 1.0); +} diff --git a/src/shaders/triangle.vert b/src/shaders/triangle.vert new file mode 100644 index 0000000..2b8dfa5 --- /dev/null +++ b/src/shaders/triangle.vert @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in vec2 a_pos; +layout(location = 1) in vec3 a_color; + +layout(location = 0) out vec3 v_color; + +void main() { + gl_Position = vec4(a_pos, 0.0, 1.0); + v_color = a_color; +}