forked from vv/efemra
1
0
Fork 0

tons more implementation, and now it does not crash, but stalls

This commit is contained in:
Vivianne 2022-07-17 01:28:01 -07:00
parent bcaa15c27f
commit 54854b1c48
12 changed files with 1071 additions and 172 deletions

View File

@ -69,6 +69,8 @@ pub const ProfileMark = struct {
const parent = ctx.current orelse &ctx.root;
const next = allocator.create(Node) catch unreachable;
next.* = .{};
next.mark = self;
next.parent = parent;
next.depth = ctx.depth;

View File

@ -0,0 +1,75 @@
const std = @import("std");
const assert = std.debug.assert;
const vk = @import("vulkan");
const vma = @import("vma");
const ProfileMark = @import("profiler.zig").ProfileMark;
const Command = @import("Command.zig");
const SubmitId = @import("submit_id.zig").SubmitId;
const memory = @import("memory.zig");
const QueueId = @import("queues.zig").QueueId;
const Self = @This();
state: State,
handle: vk.Buffer,
allocation: vma.Allocation,
size: i32,
pub fn init(size: i32, usage: vk.BufferUsageFlags, mem_usage: vma.MemUsage) !Self {
return try memory.bufferNew(&Self{}, size, usage, mem_usage);
}
pub fn release(self: *const Self) !void {
if (self.handle != .null_handle) {
memory.Releasable.init(&.{
// .frame = get frame count
.submit_id = self.getSubmit(),
.@"type" = .buffer,
.buffer = .{
.handle = self.handle,
.allocation = self.allocation,
},
});
}
self.* = std.mem.zeroes(Self);
}
const pm_reserve = ProfileMark.init("Buffer.reserve");
pub fn reserve(self: *const Self, size: i32, buffer_usage: vk.BufferUsageFlags, mem_usage: vma.MemUsage) !void {
pm_reserve.begin();
defer pm_reserve.end();
const old_size = self.size;
assert(size >= 0);
assert(old_size >= 0);
if (old_size < size) {
self.release();
const new_size = if (size > old_size * 2) size else old_size * 2;
try self.init(new_size, buffer_usage, mem_usage);
}
}
pub fn getSubmit(self: *const Self) !SubmitId {
if (self.handle != .null_handle) {
const id = if (self.state.stage != 0) {
.{
.counter = self.state.cmd_id,
.queue_id = self.state.owner,
.valid = true,
};
} else {
Command.getHeadSubmit(self.state.owner);
};
assert(id.valid);
}
return SubmitId{};
}
pub const State = struct {
owner: QueueId,
cmd_id: u32,
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
};

View File

@ -1,18 +1,22 @@
const std = @import("std");
const assert = @import("std").debug.assert;
const vk = @import("vulkan");
const settings = @import("settings.zig");
const Swapchain = @import("swapchain.zig").Swapchain;
const Image = @import("image.zig").Image;
const Context = @import("Context.zig");
const Device = @import("device.zig").Device;
const DeviceDispatch = @import("device.zig").DeviceDispatch;
const ProfileMark = @import("../../common/profiler.zig").ProfileMark;
const Renderer = @import("Renderer.zig");
const SubmitId = @import("swapchain.zig").SubmitId;
const SubmitId = @import("submit_id.zig").SubmitId;
const Fence = @import("sync.zig").Fence;
const Semaphore = @import("sync.zig").Semaphore;
const queues = @import("queues.zig");
const write_access: vk.AccessFlags = .{
pub const write_access: vk.AccessFlags = .{
.shader_write_bit = true,
.color_attachment_write_bit = true,
.depth_stencil_attachment_write_bit = true,
@ -24,7 +28,7 @@ const write_access: vk.AccessFlags = .{
.acceleration_structure_write_bit_khr = true,
.command_preprocess_write_bit_nv = true,
};
const read_access: vk.AccessFlags = .{
pub const read_access: vk.AccessFlags = .{
.indirect_command_read_bit = true,
.index_read_bit = true,
.vertex_attribute_read_bit = true,
@ -32,7 +36,6 @@ const read_access: vk.AccessFlags = .{
.input_attachment_read_bit = true,
.shader_read_bit = true,
.color_attachment_read_bit = true,
.shader_read_bit = true,
.depth_stencil_attachment_read_bit = true,
.transfer_read_bit = true,
.host_read_bit = true,
@ -46,8 +49,9 @@ const read_access: vk.AccessFlags = .{
.command_preprocess_read_bit_nv = true,
};
pub const CommandInitError = Pool.InitError || Fence.InitError || DeviceDispatch.AllocateCommandBuffersError;
const pm_init = ProfileMark.init("Command.init");
pub fn init(device: *const Device, queue: *queues.Queue, id: queues.QueueId) !void {
pub fn init(device: *const Device, queue: *queues.Queue, id: queues.QueueId) CommandInitError!void {
pm_init.begin();
defer pm_init.end();
@ -68,7 +72,7 @@ pub fn init(device: *const Device, queue: *queues.Queue, id: queues.QueueId) !vo
}
}
const pm_deinit = ProfileMark("Command.deinit");
const pm_deinit = ProfileMark.init("Command.deinit");
pub fn deinit(device: *const Device, queue: *const queues.Queue) void {
pm_init.begin();
defer pm_init.end();
@ -80,33 +84,33 @@ pub fn deinit(device: *const Device, queue: *const queues.Queue) void {
queue.cmd_pool.deinit(device);
}
pub fn get(queue_id: queues.QueueId, device: *const Device) !*Buffer {
var ctx = Context.context;
var cmd = &ctx.cur_cmd_buf[@enumToInt(queue_id)];
if (cmd.handle == .null_handle) {
try cmd.init(device, queues.get(queue_id));
}
pub fn getHeadSubmit(queue_id: queues.QueueId) SubmitId {
const ctx = &Context.context;
assert(queue_id < ctx.cur_cmd_buf.len);
const cur = &ctx.cur_cmd_buf[queue_id];
const prev = &ctx.prev_cmd_buf[queue_id];
if (!cmd.began) {
try device.dispatch.beginCommandBuffer(cmd.handle, &.{
.s_type = .command_buffer_begin_info,
.flags = .{ .one_time_submit_bit = true },
.p_inheritance_info = null,
});
cmd.began = true;
ctx.most_recent_begin = queue_id;
const cmd = if (cur.handle != .null_handle) cur else prev;
var id = SubmitId{};
if (cur.handle != .null_handle and cur.began) {
id.counter = cmd.id;
id.queue_id = cmd.queue_id;
id.valid = if (cmd.handle != .null_handle) 1 else 0;
}
return cmd;
return id;
}
pub fn getHeadSubmit(id: queues.QueueId) SubmitId {
// TODO
_ = id;
}
const pm_flush = ProfileMark.init("Command.flush");
pub fn flush(device: *Device) !void {
pm_flush.begin();
defer pm_flush.end();
pub fn flush() void {
// TODO
var ctx = &Context.context;
for (ctx.cur_cmd_buf) |*buf| {
if (buf.began) {
_ = try buf.submit(device, null, .{}, null);
}
}
}
pub const Buffer = struct {
@ -115,7 +119,7 @@ pub const Buffer = struct {
handle: vk.CommandBuffer = .null_handle,
fence: Fence,
id: u32,
queue_id: u4,
queue_id: queues.QueueId,
// indicates which types of cmds are legal in this cmdbuf
gfx: bool,
comp: bool,
@ -125,20 +129,46 @@ pub const Buffer = struct {
began: bool,
ended: bool,
submitted: bool,
is_render_pass: bool,
in_render_pass: bool,
subpass: u8,
queue_transfer_src: bool,
queue_transfer_dst: bool,
const GetError = Buffer.InitError || DeviceDispatch.BeginCommandBufferError;
pub fn get(queue_id: queues.QueueId, device: *const Device) GetError!*Buffer {
var ctx = Context.context;
var cmd = &ctx.cur_cmd_buf[@enumToInt(queue_id)];
if (cmd.handle == .null_handle) {
try cmd.init(device, queues.get(queue_id));
}
if (!cmd.began) {
try device.dispatch.beginCommandBuffer(cmd.handle, &.{
.s_type = .command_buffer_begin_info,
.flags = .{ .one_time_submit_bit = true },
.p_inheritance_info = null,
});
cmd.began = true;
ctx.most_recent_begin = queue_id;
}
return cmd;
}
pub const InitError = Fence.WaitError || Fence.ResetError;
const pm_buffer_init = ProfileMark.init("Command.Buffer.init");
// XXX: I do not really understand this?
pub fn init(self: *Self, device: *const Device, queue: *queues.Queue) !void {
pub fn init(self: *Self, device: *const Device, queue: *queues.Queue) InitError!void {
pm_buffer_init.begin();
defer pm_buffer_init.end();
const ring_mask: u32 = queue.cmds.len - 1;
// TODO: decode meaning of SASSERT((NELEM(queue->cmds) & (NELEM(queue->cmds) - 1u)) == 0);
const head_id = queue.head;
queue.head += 1;
while (head_id - queue.tail >= queue.cmds.len) : (queue.tail += 1) {
while (head_id - queue.tail >= queue.cmds.len) : (queue.tail +%= 1) {
const tail_slot = queue.tail & ring_mask;
assert(queue.cmd_ids[tail_slot] == queue.tail);
const tail_fence = queue.cmd_fences[tail_slot];
@ -165,12 +195,174 @@ pub const Buffer = struct {
self.pres = queue.pres;
}
pub fn submit(self: *const Self, wait_sema: Semaphore, wait_mask: vk.PipelineStageFlags, signal_sema: Semaphore) !SubmitId {
_ = self;
_ = wait_sema;
_ = wait_mask;
_ = signal_sema;
return undefined;
const pm_buffer_reset = ProfileMark.init("Command.Buffer.reset");
pub fn reset(self: *const Self, device: *const Device) !void {
pm_buffer_reset.begin();
defer pm_buffer_reset.end();
assert(!self.submitted);
if (self.began and !self.ended) {
try device.dispatch.endCommandBuffer(self.handle);
self.ended = true;
}
try device.dispatch.resetCommandBuffer(self.handle, null);
self.began = false;
self.ended = false;
}
const pm_buffer_beginrenderpass = ProfileMark.init("Command.Buffer.beginRenderPass");
pub fn beginRenderPass(self: *Self, device: *const Device, pass: vk.RenderPass, framebuf: vk.Framebuffer, rect: vk.Rect2D, clear_values: []*const vk.ClearValue) void {
pm_buffer_beginrenderpass.begin();
defer pm_buffer_beginrenderpass.end();
assert(self.handle != .null_handle);
assert(!self.in_render_pass);
device.dispatch.beginRenderPass(self.handle, &.{
.s_type = .render_pass_begin_info,
.render_pass = pass,
.framebuffer = framebuf,
.render_area = rect,
.clear_value_count = clear_values.len,
.p_clear_values = clear_values.ptr,
}, .@"inline");
self.in_render_pass = true;
self.subpass = 0;
}
pub fn nextSubpass(self: *Self, device: *const Device, contents: vk.SubpassContents) void {
assert(self.handle != .null_handle);
assert(self.in_render_pass);
device.dispatch.cmdNextSubpass(self.handle, contents);
self.subpass += 1;
}
pub fn endRenderPass(self: *Self, device: *const Device) void {
assert(self.handle != .null_handle);
assert(self.in_render_pass);
try device.dispatch.cmdEndRenderPass(self.handle);
self.in_render_pass = 0;
self.subpass = 0;
}
pub fn viewport(self: *Self, device: *const Device, vp: vk.Viewport, scissor: vk.Rect2D) void {
assert(self.handle != .null_handle);
device.dispatch.cmdSetViewport(self.handle, 0, 1, &vp);
device.dispatch.cmdSetScissor(self.handle, 0, 1, &scissor);
}
pub fn defaultViewport(self: *Self) void {
self.viewport(Swapchain.getViewport(), Swapchain.getRect());
}
pub fn draw(self: *Self, device: *const Device, vertex_count: i32, first_vertex: i32) void {
assert(self.handle != .null_handle);
assert(vertex_count > 0);
assert(first_vertex >= 0);
assert(self.in_render_pass);
device.dispatch.cmdDraw(self.handle, vertex_count, 1, first_vertex, 0);
}
pub fn touchBuffer(self: *Self, buf: *const Buffer) void {
assert(self.handle != .null_handle);
assert(buf.handle != .null_handle);
buf.state.cmd_id = self.id;
}
pub fn touchImage(self: *Self, img: *Image) void {
assert(self.handle != .null_handle);
assert(img.handle != .null_handle);
img.state.cmd_id = self.id;
}
const SubmitError = FlushQueueTransfersError || DeviceDispatch.EndCommandBufferError || DeviceDispatch.QueueSubmitError;
const pm_buffer_submit = ProfileMark.init("Command.Buffer.submit");
pub fn submit(self: *Self, device: *const Device, wait_sema: ?Semaphore, wait_mask: vk.PipelineStageFlags, signal_sema: ?Semaphore) SubmitError!SubmitId {
pm_buffer_submit.begin();
defer pm_buffer_submit.end();
assert(self.began);
assert(!self.ended);
assert(!self.submitted);
assert(!self.in_render_pass);
const queue_id = self.queue_id;
const queue = queues.get(queue_id);
var submit_id = SubmitId{
.counter = self.id,
.queue_id = queue_id,
};
var ctx = &Context.context;
const slot = self.id & (queue.cmds.len - 1);
const fence = queue.cmd_fences[slot];
if (self.queue_transfer_dst) {
assert(!self.queue_transfer_src);
try flushQueueTransfers(device);
}
assert(!self.queue_transfer_dst);
assert(!self.ended);
try device.dispatch.endCommandBuffer(self.handle);
self.ended = true;
assert((try fence.stat(device)) == .unsignaled);
const submit_info = vk.SubmitInfo{
.s_type = .submit_info,
.wait_semaphore_count = if (wait_sema != null) @as(u32, 1) else 0,
.p_wait_semaphores = if (wait_sema) |s| @ptrCast([*]const vk.Semaphore, &s.handle) else undefined,
.p_wait_dst_stage_mask = @ptrCast([*]const vk.PipelineStageFlags, &wait_mask),
.signal_semaphore_count = if (signal_sema != null) @as(u32, 1) else 0,
.p_signal_semaphores = if (signal_sema) |s| @ptrCast([*]const vk.Semaphore, &s.handle) else undefined,
.command_buffer_count = 1,
.p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &self.handle),
};
try device.dispatch.queueSubmit(queue.handle, 1, @ptrCast([*]const vk.SubmitInfo, &submit_info), fence.handle);
self.submitted = true;
ctx.last_submit_queue = self.queue_id;
// copy to prevCmd for debug purposes
var prev_cmd = &ctx.prev_cmd_buf[@enumToInt(queue_id)];
prev_cmd.* = self.*;
// reset to blank slate
self.* = std.mem.zeroes(Self);
self.queue_id = queue_id;
submit_id.valid = true;
return submit_id;
}
const FlushQueueTransfersError = DeviceDispatch.EndCommandBufferError || DeviceDispatch.QueueSubmitError;
const pm_buffer_flushqueuetransfers = ProfileMark.init("Command.Buffer.flushqueuetransfers");
pub fn flushQueueTransfers(device: *const Device) FlushQueueTransfersError!void {
pm_buffer_flushqueuetransfers.begin();
defer pm_buffer_flushqueuetransfers.end();
var ctx = Context.context;
var submits: [ctx.cur_cmd_buf.len]bool = undefined;
for (ctx.cur_cmd_buf) |_, i| {
var buf = &ctx.cur_cmd_buf[i];
assert(!buf.in_render_pass);
if (buf.queue_transfer_src) {
assert(!buf.queue_transfer_dst);
_ = try buf.submit(device, null, .{}, null);
assert(!buf.queue_transfer_src);
submits[i] = true;
}
}
for (ctx.cur_cmd_buf) |*cmd, i| {
cmd.queue_transfer_dst = false;
if (submits[i]) {
_ = try get(@intToEnum(queues.QueueId, i), device); // reinitialize cmdbuf
}
}
}
};
@ -179,7 +371,8 @@ pub const Pool = struct {
handle: vk.CommandPool,
pub fn init(device: *const Device, family: u32, flags: vk.CommandPoolCreateFlags) !Self {
pub const InitError = DeviceDispatch.CreateCommandPoolError;
pub fn init(device: *const Device, family: u32, flags: vk.CommandPoolCreateFlags) InitError!Self {
const handle = try device.dispatch.createCommandPool(device.handle, &.{
.s_type = .command_pool_create_info,
.flags = flags,

View File

@ -1,3 +1,4 @@
const std = @import("std");
const vk = @import("vulkan");
const CommandBuffer = @import("Command.zig").Buffer;
const QueueId = @import("queues.zig").QueueId;
@ -12,10 +13,10 @@ const Self = @This();
pub var context: Self = Self{};
pub fn init() Self {
context = .{};
context = std.mem.zeroes(@This());
return context;
}
pub fn deinit() void {
context = .{};
context = std.mem.zeroes(@This());
}

View File

@ -76,7 +76,7 @@ pub fn update(self: *Self) !void {
// base system update
{
_ = self.swapchain.acquireSync(&self.device);
_ = try self.swapchain.acquireSync(&self.device);
_ = try self.swapchain.acquireImage(&self.device);
memory.update();
}
@ -90,7 +90,7 @@ pub fn update(self: *Self) !void {
{
//self.main_pass.setup();
// TODO textable update
Command.flush();
try Command.flush(&self.device);
// TODO bindings update
}
@ -99,12 +99,12 @@ pub fn update(self: *Self) !void {
// present phase
// currently only graphics queue
try self.swapchain.submit(try Command.get(.graphics, &self.device), &self.device);
try self.swapchain.submit(try Command.Buffer.get(.graphics, &self.device), &self.device);
// background work
{
// TODO upload lightmaps, imsys clear
Command.flush();
try Command.flush(&self.device);
}
}

View File

@ -19,7 +19,7 @@ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
// TODO: filter by settings!
const DeviceDispatch = vk.DeviceWrapper(.{
pub const DeviceDispatch = vk.DeviceWrapper(.{
.destroyDevice = true,
.getDeviceQueue = true,
.createSemaphore = true,
@ -71,6 +71,7 @@ const DeviceDispatch = vk.DeviceWrapper(.{
.cmdSetScissor = true,
.cmdBindVertexBuffers = true,
.cmdCopyBuffer = true,
.cmdPipelineBarrier = true,
.getFenceStatus = true,
});
@ -181,6 +182,7 @@ pub const Device = struct {
for (feats_list) |*feat, i| {
feat.* = .{
.phdev = .{
.s_type = .physical_device_features_2,
.features = .{},
},
};
@ -532,7 +534,7 @@ pub const Device = struct {
self.handle = try instance.dispatch.createDevice(self.physical_device, &.{
.flags = .{},
.p_next = &self.props.phdev,
.p_next = &self.feats.phdev,
.queue_create_info_count = @intCast(u32, families.count()),
.p_queue_create_infos = &queue_infos,
.enabled_layer_count = @intCast(u32, enabled_layers.len),

View File

@ -1,20 +1,26 @@
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const vk = @import("vulkan");
const vma = @import("vma");
const Device = @import("device.zig").Device;
const Command = @import("Command.zig");
const SubmitId = @import("swapchain.zig").SubmitId;
const std = @import("std");
const SubmitId = @import("submit_id.zig").SubmitId;
const queues = @import("queues.zig");
const settings = @import("settings.zig");
const memory = @import("memory.zig");
const Swapchain = @import("swapchain.zig").Swapchain;
// TODO memory
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
pub const Image = struct {
const Self = @This();
state: ImageState,
handle: vk.Image,
state: State = .{},
handle: vk.Image = .null_handle,
allocation: vma.Allocation,
view: vk.ImageView,
format: vk.Format,
@ -47,6 +53,7 @@ pub const Image = struct {
self.handle = handle;
self.image_type = info.image_type;
self.format = info.format;
self.state = State{};
self.state.layout = info.initial_layout;
self.usage = info.usage;
self.width = @intCast(u16, info.extent.width);
@ -64,79 +71,645 @@ pub const Image = struct {
}
fn getSubmit(self: *Self) SubmitId {
std.debug.assert(!self.state.substates);
const id = if (self.state.stage != 0) {
.{
.counter = self.state.cmd_id,
.queue_id = self.state.owner,
.valid = true,
assert(!self.state.substates);
if (self.handle != .null_handle) {
const id = if (self.state.stage != 0) {
.{
.counter = self.state.cmd_id,
.queue_id = self.state.owner,
.valid = true,
};
} else {
Command.getHeadSubmit(self.state.owner);
};
} else {
Command.getHeadSubmit(self.state.owner);
};
std.debug.assert(id.valid);
return id;
}
};
const ImageState = struct {
const Self = @This();
owner: queues.QueueId,
cmd_id: i32,
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
layout: vk.ImageLayout,
substates: [*]SubImageState,
pub fn presentSrc(self: *Self, command: *const Command.Buffer) !void {
// TODO
_ = self;
_ = command;
}
};
const ImageSet = struct {
const Self = @This();
frames: [settings.resource_sets]Image,
pub fn init(info: *const vk.ImageCreateInfo, mem_usage: vma.MemUsage) !Self {
const self = Self{};
errdefer self.deinit();
for (self.frames) |frame| {
try frame.init(info, mem_usage);
assert(id.valid);
return id;
}
return SubmitId{};
}
pub fn deinit(self: *Self) void {
for (self.frames) |frame| {
frame.deinit();
pub const State = struct {
owner: queues.QueueId = .graphics,
cmd_id: u32 = 0,
stage: vk.PipelineStageFlags = .{},
access: vk.AccessFlags = .{},
layout: vk.ImageLayout = .@"undefined",
substates: ?[]SubState = null,
pub fn transferSrc(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.xfer);
const state = State{
.owner = buf.queue_id,
.stage = .{ .transfer_bit = true },
.access = .{ .transfer_read_bit = true },
.layout = .transfer_src_optimal,
};
return try state.handle(device, buf, img);
}
}
pub fn reserve(self: *Self, info: *const vk.ImageCreateInfo, mem_usage: memory.Usage) !void {
try self.current().reserve(info, mem_usage);
}
pub fn transferDst(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.xfer);
const state = State{
.owner = buf.queue_id,
.stage = .{ .transfer_bit = true },
.access = .{ .transfer_write_bit = true },
.layout = .transfer_dst_optimal,
};
return try state.handle(device, buf, img);
}
pub fn current(self: *Self, swapchain: *Swapchain) *Image {
std.debug.assert(swapchain.sync_index < self.frames.len);
return &self.frames[swapchain.sync_index];
}
pub fn fragSample(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.gfx);
const state = State{
.owner = buf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .shader_read_only_optimal,
};
return try state.handle(device, buf, img);
}
pub fn prev(self: *Self, swapchain: *Swapchain) *Image {
const prev_index = (swapchain.sync_index + (settings.resource_sets - 1)) % settings.resource_sets;
std.debug.assert(prev_index < self.frames.len);
return &self.frames[prev_index];
}
};
pub fn computeSample(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.comp);
const state = State{
.owner = buf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .shader_read_only_optimal,
};
return try state.handle(device, buf, img);
}
const SubImageState = packed struct {
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
layout: vk.ImageLayout,
pub fn fragLoad(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.gfx);
const state = State{
.owner = buf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .shader_read_only_optimal,
};
return try state.handle(device, buf, img);
}
pub fn fragStore(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.gfx);
const state = State{
.owner = buf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_write_bit = true },
.layout = .general,
};
return try state.handle(device, buf, img);
}
pub fn fragLoadStore(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.gfx);
const state = State{
.owner = buf.queue_id,
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true, .shader_write_bit = true },
.layout = .general,
};
return try state.handle(device, buf, img);
}
pub fn computeLoad(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.comp);
const state = State{
.owner = buf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .shader_read_only_optimal,
};
return try state.handle(device, buf, img);
}
pub fn computeStore(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.comp);
const state = State{
.owner = buf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_write_bit = true },
.layout = .general,
};
return try state.handle(device, buf, img);
}
pub fn computeLoadStore(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.comp);
const state = State{
.owner = buf.queue_id,
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true, .shader_write_bit = true },
.layout = .general,
};
return try state.handle(device, buf, img);
}
pub fn colorAttachWrite(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.gfx);
const state = State{
.owner = buf.queue_id,
.stage = .{ .color_attachment_output_bit = true },
.access = .{ .color_attachment_write_bit = true },
.layout = .color_attachment_optimal,
};
return try state.handle(device, buf, img);
}
pub fn depthAttachWrite(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.gfx);
const state = State{
.owner = buf.queue_id,
.stage = .{ .late_fragment_tests_bit = true },
.access = .{ .depth_stencil_attachment_write_bit = true },
.layout = .depth_stencil_attachment_optimal,
};
return try state.handle(device, buf, img);
}
pub fn presentSrc(device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(buf.pres);
const state = State{
.owner = buf.queue_id,
.stage = .{ .all_graphics_bit = true },
.access = .{ .color_attachment_read_bit = true, .color_attachment_write_bit = true },
.layout = .present_src_khr,
};
return try state.handle(device, buf, img);
}
fn handle(self: *const State, device: *const Device, buf: *Command.Buffer, img: *Image) !bool {
assert(img.handle != .null_handle);
assert(self.access.toInt() != 0);
assert(self.stage.toInt() != 0);
var inserted_barrier = false;
const prev = &img.state;
const prev_queue = queues.get(prev.owner);
const next_queue = queues.get(self.owner);
assert(self.stage.intersect(next_queue.stage_mask).toInt() == self.stage.toInt());
assert(self.access.intersect(next_queue.access_mask).toInt() == self.access.toInt());
if (prev.substates != null) {
assert(prev.owner == self.owner);
inserted_barrier = try SubState.handle(device, buf, img, &.{
.stage = self.stage,
.access = self.access,
.layout = self.layout,
}, 0, img.array_layers, 0, img.mip_levels);
assert(img.state.substates == null);
return inserted_barrier;
}
const new_resource = if (prev.stage.toInt() == 0) {
prev.stage = .{ .top_of_pipe_bit = true };
return true;
} else false;
const aspect = if (img.usage.depth_stencil_attachment_bit) vk.ImageAspectFlags{ .depth_bit = true } else vk.ImageAspectFlags{ .color_bit = true };
if (prev_queue != next_queue) {
// queue ownership transfer
var src_cmd = try Command.Buffer.get(prev.owner, device);
var dst_cmd = try Command.Buffer.get(self.owner, device);
if (src_cmd.queue_transfer_dst or dst_cmd.queue_transfer_src) {
try Command.Buffer.flushQueueTransfers(device);
}
const barrier: vk.ImageMemoryBarrier = .{
.s_type = .image_memory_barrier,
.src_access_mask = prev.access,
.dst_access_mask = self.access,
.old_layout = prev.layout,
.new_layout = self.layout,
.src_queue_family_index = prev_queue.family,
.dst_queue_family_index = next_queue.family,
.image = img.handle,
.subresource_range = .{
.aspect_mask = aspect,
.base_mip_level = 0,
.level_count = vk.REMAINING_MIP_LEVELS,
.base_array_layer = 0,
.layer_count = vk.REMAINING_ARRAY_LAYERS,
},
};
assert(!src_cmd.queue_transfer_dst);
src_cmd.queue_transfer_src = true;
device.dispatch.cmdPipelineBarrier(src_cmd.handle, prev.stage, self.stage, .{}, 0, undefined, 0, undefined, 1, @ptrCast([*]const vk.ImageMemoryBarrier, &barrier));
assert(!dst_cmd.queue_transfer_src);
dst_cmd.queue_transfer_dst = true;
device.dispatch.cmdPipelineBarrier(dst_cmd.handle, prev.stage, self.stage, .{}, 0, undefined, 0, undefined, 1, @ptrCast([*]const vk.ImageMemoryBarrier, &barrier));
prev.* = self.*;
inserted_barrier = true;
} else {
const layout_change = prev.layout != self.layout;
const src_read = prev.access.contains(Command.read_access);
const src_write = prev.access.contains(Command.write_access);
const dst_read = self.access.contains(Command.read_access);
const dst_write = self.access.contains(Command.write_access);
const read_after_write = src_write and dst_read;
const write_after_read = src_read and dst_write;
const write_after_write = src_write and dst_write;
if (layout_change or read_after_write or write_after_read or write_after_write or new_resource) {
// data hazard, insert barrier
const cmd = try Command.Buffer.get(self.owner, device);
const barrier: vk.ImageMemoryBarrier = .{
.s_type = .image_memory_barrier,
.src_access_mask = prev.access,
.dst_access_mask = self.access,
.old_layout = prev.layout,
.new_layout = self.layout,
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
.image = img.handle,
.subresource_range = .{
.aspect_mask = aspect,
.base_mip_level = 0,
.level_count = vk.REMAINING_MIP_LEVELS,
.base_array_layer = 0,
.layer_count = vk.REMAINING_ARRAY_LAYERS,
},
};
device.dispatch.cmdPipelineBarrier(cmd.handle, prev.stage, self.stage, .{}, 0, undefined, 0, undefined, 1, @ptrCast([*]const vk.ImageMemoryBarrier, &barrier));
prev.* = self.*;
inserted_barrier = true;
} else {
// no data hazard, append usage state
prev.stage = prev.stage.merge(self.stage);
prev.access = prev.access.merge(self.access);
}
}
(try Command.Buffer.get(self.owner, device)).touchImage(img);
assert(img.state.layout == self.layout);
return inserted_barrier;
}
};
const SubState = struct {
stage: vk.PipelineStageFlags,
access: vk.AccessFlags,
layout: vk.ImageLayout,
pub fn transferSrc(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.xfer);
const self = SubState{
.stage = .{ .transfer_bit = true },
.access = .{ .transfer_read_bit = true },
.layout = .transfer_src_optimal,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn transferDst(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.xfer);
const self = SubState{
.stage = .{ .transfer_bit = true },
.access = .{ .transfer_write_bit = true },
.layout = .transfer_dst_optimal,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn fragSample(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.gfx);
const self = SubState{
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .shader_read_only_optimal,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn computeSample(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.comp);
const self = SubState{
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .shader_read_only_optimal,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn fragLoad(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.gfx);
const self = SubState{
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .general,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn fragStore(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.xfer);
const self = SubState{
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_write_bit = true },
.layout = .general,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn fragLoadStore(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.gfx);
const self = SubState{
.stage = .{ .fragment_shader_bit = true },
.access = .{ .shader_read_bit = true, .shader_write_bit = true },
.layout = .general,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn computeLoad(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.compm);
const self = SubState{
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true },
.layout = .general,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn computeStore(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.comp);
const self = SubState{
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_write_bit = true },
.layout = .general,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn computeLoadStore(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.comp);
const self = SubState{
.stage = .{ .compute_shader_bit = true },
.access = .{ .shader_read_bit = true, .shader_write_bit = true },
.layout = .general,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn colorAttachWrite(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.gfx);
const self = SubState{
.stage = .{ .color_attachment_output_bit = true },
.access = .{ .color_attachment_write_bit = true },
.layout = .color_attachment_optimal,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn depthAttachWrite(device: *const Device, buf: *Command.Buffer, img: *Image, base_layer: i32, layer_count: i32, base_mip: i32, mip_count: i32) !bool {
assert(buf.gfx);
const self = SubState{
.stage = .{ .late_fragment_tests_bit = true },
.access = .{ .depth_stencil_attachment_write_bit = true },
.layout = .depth_stencil_attachment_optimal,
};
return try handle(device, buf, img, &self, base_layer, layer_count, base_mip, mip_count);
}
pub fn handle(device: *const Device, buf: *Command.Buffer, img: *Image, next: *const SubState, base_layer: usize, layer_count: usize, base_mip: usize, mip_count: usize) !bool {
{
assert(img.handle != .null_handle);
assert(next.access.toInt() != 0);
assert(next.stage.toInt() != 0);
assert(layer_count > 0);
assert(mip_count > 0);
assert(buf == (try Command.Buffer.get(img.state.owner, device)));
assert(buf.handle != .null_handle);
if (builtin.mode == .Debug or builtin.mode == .ReleaseSafe) {
const queue = queues.get(buf.queue_id);
assert(next.stage.toInt() == queue.stage_mask.toInt());
assert(next.access.toInt() == queue.access_mask.toInt());
}
}
var inserted_barrier = false;
const L = img.array_layers;
const M = img.mip_levels;
assert(base_layer + layer_count <= L);
assert(base_mip + mip_count <= M);
const substate_count = L * M;
var prev = &img.state;
var substates: []SubState = undefined;
if (prev.substates) |ss| {
assert(prev.layout == .@"undefined");
assert(prev.access.toInt() == 0);
assert(prev.stage.toInt() == 0);
substates = ss;
} else {
substates = try allocator.alloc(SubState, substate_count);
prev.substates = substates;
if (prev.stage.toInt() == 0) {
prev.stage = .{ .top_of_pipe_bit = true };
}
for (substates) |*ss| {
ss.stage = prev.stage;
ss.access = prev.access;
ss.layout = prev.layout;
}
}
const aspect = if (img.usage.depth_stencil_attachment_bit) vk.ImageAspectFlags{ .depth_bit = true } else vk.ImageAspectFlags{ .color_bit = true };
const max_barriers = layer_count * mip_count;
const barriers = try allocator.alloc(vk.ImageMemoryBarrier, max_barriers);
defer allocator.free(barriers);
prev.layout = .@"undefined";
prev.access = .{};
prev.stage = .{};
var b: usize = 0;
var l: usize = 0;
while (l < layer_count) : (l += 1) {
const i_layer = base_layer + l;
var m: usize = 0;
while (m < mip_count) : (m += 1) {
const i_mip = base_mip + m;
const prev_sub = &substates[i_layer * M + i_mip];
prev.stage = prev.stage.merge(prev_sub.stage);
prev.access = prev.access.merge(prev_sub.access);
const layout_change = prev_sub.layout != next.layout;
const src_read = prev_sub.access.contains(Command.read_access);
const src_write = prev_sub.access.contains(Command.write_access);
const dst_read = next.access.contains(Command.read_access);
const dst_write = next.access.contains(Command.write_access);
const read_after_write = src_write and dst_read;
const write_after_read = src_read and dst_write;
const write_after_write = src_write and dst_write;
if (layout_change or read_after_write or write_after_read or write_after_write) {
assert(b < max_barriers);
const barrier = &barriers[b];
b += 1;
barrier.* = .{
.s_type = .image_memory_barrier,
.src_access_mask = prev_sub.access,
.dst_access_mask = next.access,
.old_layout = prev_sub.layout,
.new_layout = next.layout,
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
.image = img.handle,
.subresource_range = .{
.aspect_mask = aspect,
.base_mip_level = @intCast(u32, i_mip),
.level_count = 1,
.base_array_layer = @intCast(u32, i_layer),
.layer_count = 1,
},
};
prev_sub.* = next.*;
} else {
prev_sub.access = prev_sub.access.merge(next.access);
prev_sub.stage = prev_sub.stage.merge(next.stage);
}
}
}
// merge mips
var i: usize = 0;
while ((i + 1) < b) : (i += 1) {
const dst = &barriers[i];
const src = &barriers[i + 1];
const dst_range = &dst.subresource_range;
const src_range = &src.subresource_range;
if (dst.old_layout != src.old_layout) {
continue;
}
if (dst_range.base_array_layer != src_range.base_array_layer) {
continue;
}
if (dst_range.base_mip_level + dst_range.level_count == src_range.base_mip_level) {
dst_range.level_count += src_range.level_count;
dst.src_access_mask = dst.src_access_mask.merge(src.src_access_mask);
assert(dst_range.level_count <= img.mip_levels);
var j: usize = i + 1;
while ((j + 1) < b) : (j += 1) {
barriers[j] = barriers[j + 1];
}
b -= 1;
assert(b >= 1);
i -= 1;
}
}
// merge layers
i = 0;
while ((i + 1) < b) : (i += 1) {
const dst = &barriers[i];
const src = &barriers[i + 1];
const dst_range = &dst.subresource_range;
const src_range = &src.subresource_range;
if (dst.old_layout != src.old_layout) {
continue;
}
if (dst_range.base_mip_level != src_range.base_mip_level) {
continue;
}
if (dst_range.level_count != src_range.level_count) {
continue;
}
if (dst_range.base_array_layer + dst_range.layer_count == src_range.base_array_layer) {
dst_range.layer_count += src_range.layer_count;
dst.src_access_mask = dst.src_access_mask.merge(src.src_access_mask);
assert(dst_range.layer_count <= img.array_layers);
var j: usize = i + 1;
while ((j + 1) < b) : (j += 1) {
barriers[j] = barriers[j + 1];
}
b -= 1;
assert(b >= 1);
i -= 1;
}
}
assert(b >= 1);
device.dispatch.cmdPipelineBarrier(buf.handle, prev.stage, next.stage, .{}, 0, undefined, 0, undefined, @intCast(u32, b), barriers.ptr);
inserted_barrier = true;
b = 0;
prev.access = substates[0].access;
prev.stage = substates[0].stage;
prev.layout = substates[0].layout;
var is_uniform = true;
i = 1;
while (i < substate_count) : (i += 1) {
if (substates[0].layout != substates[i].layout) {
is_uniform = false;
break;
}
prev.access = prev.access.merge(substates[i].access);
prev.stage = prev.stage.merge(substates[i].stage);
}
if (is_uniform) {
prev.substates = null;
allocator.free(substates);
} else {
prev.layout = .@"undefined";
prev.access = .{};
prev.stage = .{};
}
buf.touchImage(img);
return inserted_barrier;
}
};
const Set = struct {
frames: [settings.resource_sets]Image,
pub fn init(info: *const vk.ImageCreateInfo, mem_usage: vma.MemUsage) !Set {
const self = Set{};
errdefer self.deinit();
for (self.frames) |frame| {
try frame.init(info, mem_usage);
}
}
pub fn deinit(self: *Set) void {
for (self.frames) |frame| {
frame.deinit();
}
}
pub fn reserve(self: *Set, info: *const vk.ImageCreateInfo, mem_usage: memory.Usage) !void {
try self.current().reserve(info, mem_usage);
}
pub fn current(self: *Set, swapchain: *Swapchain) *Image {
assert(swapchain.sync_index < self.frames.len);
return &self.frames[swapchain.sync_index];
}
pub fn prev(self: *Set, swapchain: *Swapchain) *Image {
const prev_index = (swapchain.sync_index + (settings.resource_sets - 1)) % settings.resource_sets;
assert(prev_index < self.frames.len);
return &self.frames[prev_index];
}
};
};
pub fn infoToViewInfo(info: *const vk.ImageCreateInfo) !vk.ImageViewCreateInfo {

View File

@ -4,7 +4,7 @@ const framebuffer = @import("framebuffer.zig");
const Base = @import("Base.zig");
const Device = @import("device.zig").Device;
const Instance = @import("instance.zig").Instance;
const SubmitId = @import("swapchain.zig").SubmitId;
const SubmitId = @import("submit_id.zig").SubmitId;
const settings = @import("settings.zig");
const queues = @import("queues.zig");
const vma = @import("vma");
@ -236,12 +236,17 @@ const ReleasableUnion = union(ReleasableType) {
attachment: vk.ImageView,
};
const Releasable = struct {
pub const Releasable = struct {
const Self = @This();
submit_id: SubmitId,
object: ReleasableUnion,
pub fn init(self: *const Self) void {
// TODO
_ = self;
}
pub fn deinit(self: *const Self, device: *const Device, allocator: Allocator) void {
switch (self.object) {
.buffer => |buffer| {

View File

@ -272,7 +272,7 @@ pub const Queue = struct {
index: QueueId = 0,
access_mask: vk.AccessFlags = .{},
stage_mask: vk.PipelineStageFlags = .{},
queue_id: u4 = 0,
queue_id: QueueId = .graphics,
gfx: bool = false,
comp: bool = false,
xfer: bool = false,

View File

@ -0,0 +1,73 @@
const std = @import("std");
const assert = std.debug;
const Device = @import("device.zig").Device;
const queues = @import("queues.zig");
const profiler = @import("../../common/profiler.zig");
pub const SubmitId = packed struct {
counter: u32 = 0,
queue_id: queues.QueueId = .graphics,
valid: bool = false,
const pm_poll = profiler.ProfileMark.init("SubmitId.poll");
pub fn poll(self: *const SubmitId, device: *const Device) !bool {
pm_poll.begin();
defer pm_poll.end();
assert(self.valid);
const queue = queues.get(self.queue_id);
const ring_mask = queue.cmds.len - 1;
while (self.counter -% queue.tail < queue.cmds.len) : (queue.tail += 1) {
const i = queue.tail & ring_mask;
if ((try queue.cmd_fences[i].stat(device)) != .signaled) {
return false;
}
device.dispatch.resetCommandBuffer(queue.cmds[i], .{});
queue.cmd_ids[i] = 0;
}
return true;
}
const pm_wait = profiler.ProfileMark.init("SubmitId.wait");
pub fn wait(self: *const SubmitId, device: *const Device) !void {
pm_wait.begin();
defer pm_wait.end();
std.debug.assert(self.valid);
const queue = queues.get(self.queue_id);
const ring_mask = queue.cmds.len - 1;
while ((self.counter -% queue.tail) < queue.cmds.len) : (queue.tail += 1) {
const i = queue.tail & ring_mask;
std.debug.assert(queue.cmd_ids[i] == queue.tail);
try queue.cmd_fences[i].wait(device);
device.dispatch.resetCommandBuffer(queue.cmds[i], .{}) catch {};
queue.cmd_ids[i] = 0;
}
}
const pm_waitall = profiler.ProfileMark.init("SubmitId.waitAll");
pub fn waitAll(device: *const Device) !void {
pm_waitall.begin();
defer pm_waitall.end();
var queueId: usize = 0;
while (queueId < queues.QueueId.count) : (queueId += 1) {
const queue = queues.get(@intToEnum(queues.QueueId, queueId));
const ring_mask = queue.cmds.len - 1;
while (queue.tail != queue.head) : (queues.tail += 1) {
const i = queue.tail & ring_mask;
assert(queues.cmd_ids[i] == queue.tail);
try queues.cmd_fences[i].wait(device);
device.dispatch.resetCommandBuffer(queue.cmds[i], .{});
queues.cmd_ids[i] = 0;
}
}
}
};

View File

@ -10,6 +10,7 @@ const Window = @import("display.zig").Window;
const sync = @import("sync.zig");
const queues = @import("queues.zig");
const Image = @import("image.zig").Image;
const SubmitId = @import("submit_id.zig").SubmitId;
// TODO memory
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
@ -67,6 +68,7 @@ const Support = struct {
pub fn deinit(self: *const Self) void {
allocator.free(self.formats);
allocator.free(self.modes);
}
pub fn selectSwapFormat(self: *const Self) !vk.SurfaceFormatKHR {
@ -115,32 +117,6 @@ const Support = struct {
}
};
pub const SubmitId = packed struct {
counter: u32,
queue_id: queues.QueueId,
valid: bool,
const pm_wait = profiler.ProfileMark.init("swapchain.SubmitId.wait");
pub fn wait(self: *const SubmitId, device: *const Device) void {
pm_wait.begin();
defer pm_wait.end();
std.debug.assert(self.valid);
const queue = queues.get(self.queue_id);
const ring_mask = queue.cmds.len - 1;
while ((self.counter - queue.tail) < queue.cmds.len) : (queue.tail += 1) {
const i = queue.tail & ring_mask;
std.debug.assert(queue.cmd_ids[i] == queue.tail);
queue.cmd_fences[i].wait(device) catch {};
device.dispatch.resetCommandBuffer(queue.cmds[i], .{}) catch {};
queue.cmd_ids[i] = 0;
}
}
};
pub const Swapchain = struct {
const Self = @This();
@ -153,13 +129,13 @@ pub const Swapchain = struct {
length: u32,
image_index: usize = 0,
image_submits: [settings.max_swapchain_len]SubmitId = undefined,
images: [settings.max_swapchain_len]Image = undefined,
image_submits: [settings.max_swapchain_len]SubmitId = std.mem.zeroes([settings.max_swapchain_len]SubmitId),
images: [settings.max_swapchain_len]Image = std.mem.zeroes([settings.max_swapchain_len]Image),
sync_index: usize = 0,
sync_submits: [settings.resource_sets]SubmitId = undefined,
available_semas: [settings.resource_sets]sync.Semaphore = undefined,
rendered_semas: [settings.resource_sets]sync.Semaphore = undefined,
sync_submits: [settings.resource_sets]SubmitId = std.mem.zeroes([settings.resource_sets]SubmitId),
available_semas: [settings.resource_sets]sync.Semaphore = std.mem.zeroes([settings.resource_sets]sync.Semaphore),
rendered_semas: [settings.resource_sets]sync.Semaphore = std.mem.zeroes([settings.resource_sets]sync.Semaphore),
pub fn init(instance: *const Instance, window: *const Window, device: *const Device, previous: ?*Self) !Self {
const support = try Support.init(instance, device, window);
@ -168,8 +144,6 @@ pub const Swapchain = struct {
const queue_support = try queues.Support.init(instance, device.physical_device, window);
defer queue_support.deinit();
_ = previous;
const format = try support.selectSwapFormat();
const mode = try support.selectSwapMode();
const ext = support.selectSwapExtent(window);
@ -184,15 +158,13 @@ pub const Swapchain = struct {
const usage: vk.ImageUsageFlags = .{ .color_attachment_bit = true };
const image_format = .r5g6b5_unorm_pack16;
const swap_info = vk.SwapchainCreateInfoKHR{
.s_type = .swapchain_create_info_khr,
.flags = .{},
.surface = window.surface,
.present_mode = mode,
.min_image_count = img_count,
.image_format = image_format,
.image_format = format.format,
.image_color_space = format.color_space,
.image_extent = ext,
.image_array_layers = 1,
@ -214,14 +186,14 @@ pub const Swapchain = struct {
if (previous == null) {
std.debug.print("Present mode: '{s}'\n", .{@tagName(mode)});
std.debug.print("Present extent: '{} x {}'\n", .{ ext.width, ext.height });
std.debug.print("Present images: '{}'\n", .{img_count});
std.debug.print("Present extent: {} x {}\n", .{ ext.width, ext.height });
std.debug.print("Present images: {}\n", .{img_count});
std.debug.print("Present sharing mode: '{s}'\n", .{if (concurrent) @as([]const u8, "Concurrent") else "Exclusive"});
std.debug.print("Color space: '{s}'\n", .{@tagName(format.color_space)});
std.debug.print("Format: '{s}'\n", .{@tagName(image_format)});
std.debug.print("Format: '{s}'\n", .{@tagName(format.format)});
}
var images: [settings.max_swapchain_len]vk.Image = undefined;
var images: [settings.max_swapchain_len]vk.Image = std.mem.zeroes([settings.max_swapchain_len]vk.Image);
_ = try device.dispatch.getSwapchainImagesKHR(device.handle, handle, &img_count, null);
if (img_count > settings.max_swapchain_len) {
return error.TooManyImages;
@ -229,12 +201,10 @@ pub const Swapchain = struct {
_ = try device.dispatch.getSwapchainImagesKHR(device.handle, handle, &img_count, &images);
const color_format = .r5g6b5_unorm_pack16;
var self = Self{
.handle = handle,
.mode = mode,
.color_format = color_format,
.color_format = format.format,
.color_space = format.color_space,
.width = ext.width,
.height = ext.height,
@ -251,7 +221,7 @@ pub const Swapchain = struct {
.height = ext.height,
.depth = 1,
},
.format = color_format,
.format = format.format,
.mip_levels = 1,
.array_layers = 1,
.samples = .{ .@"1_bit" = true },
@ -289,14 +259,14 @@ pub const Swapchain = struct {
}
const pm_acquiresync = profiler.ProfileMark.init("swapchain.Swapchain.acquireSync");
pub fn acquireSync(self: *Self, device: *const Device) usize {
pub fn acquireSync(self: *Self, device: *const Device) !usize {
pm_acquiresync.begin();
defer pm_acquiresync.end();
const sync_index = (self.sync_index + 1) % self.sync_submits.len;
const sub = self.sync_submits[sync_index];
if (sub.valid) {
sub.wait(device);
try sub.wait(device);
}
self.sync_index = sync_index;
@ -313,7 +283,7 @@ pub const Swapchain = struct {
std.debug.assert(result.image_index < self.length);
const sub = self.image_submits[result.image_index];
if (sub.valid) {
sub.wait(device);
try sub.wait(device);
}
self.image_index = result.image_index;
@ -321,7 +291,7 @@ pub const Swapchain = struct {
}
const pm_submit = profiler.ProfileMark.init("swapchain.Swapchain.submit");
pub fn submit(self: *Self, command: *const Command.Buffer, device: *const Device) !void {
pub fn submit(self: *Self, command: *Command.Buffer, device: *const Device) !void {
pm_submit.begin();
defer pm_submit.end();
@ -330,9 +300,10 @@ pub const Swapchain = struct {
{
const backbuf = self.getBackBuffer();
const prev_use = backbuf.state.stage;
try backbuf.state.presentSrc(command);
//std.debug.print("PRESENT... swapchain {}\n\n{} \n", .{self, images});
_ = try Image.State.presentSrc(device, command, backbuf);
const submit_id = try command.submit(self.available_semas[sync_index], prev_use, self.rendered_semas[sync_index]);
const submit_id = try command.submit(device, self.available_semas[sync_index], prev_use, self.rendered_semas[sync_index]);
self.sync_submits[sync_index] = submit_id;
self.image_submits[image_index] = submit_id;
}

View File

@ -3,6 +3,7 @@ const assert = std.debug.assert;
const vk = @import("vulkan");
const Device = @import("device.zig").Device;
const DeviceDispatch = @import("device.zig").DeviceDispatch;
const Renderer = @import("Renderer.zig");
@ -37,6 +38,7 @@ pub const Fence = struct {
handle: vk.Fence,
pub const InitError = DeviceDispatch.CreateFenceError;
pub fn init(device: *const Device, signaled: bool) !Self {
const handle = try device.dispatch.createFence(device.handle, &.{ .s_type = .fence_create_info, .flags = .{
.signaled_bit = if (signaled) true else false,
@ -51,18 +53,20 @@ pub const Fence = struct {
device.dispatch.destroyFence(device.handle, self.handle, null);
}
pub fn reset(self: *const Self, device: *const Device) !void {
pub const ResetError = DeviceDispatch.ResetFencesError;
pub fn reset(self: *const Self, device: *const Device) ResetError!void {
try device.dispatch.resetFences(device.handle, 1, @ptrCast([*]const vk.Fence, &self.handle));
}
pub fn wait(self: *const Self, device: *const Device) !void {
pub const WaitError = DeviceDispatch.WaitForFencesError || DeviceDispatch.GetFenceStatusError;
pub fn wait(self: *const Self, device: *const Device) WaitError!void {
const timeout = std.math.maxInt(u64);
while ((try self.stat(device)) != .signaled) {
_ = try device.dispatch.waitForFences(device.handle, 1, @ptrCast([*]const vk.Fence, &self.handle), vk.FALSE, timeout);
}
}
pub fn stat(self: *const Self, device: *const Device) !State {
pub fn stat(self: *const Self, device: *const Device) DeviceDispatch.GetFenceStatusError!State {
return @intToEnum(State, @enumToInt(try device.dispatch.getFenceStatus(device.handle, self.handle)));
}
};