Commit 9f87182e authored by Noxim's avatar Noxim
Browse files

initial commit

parents
/target
Cargo.lock
[package]
name = "wgpu-mapping-tracking"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
wgpu = "0.12"
tokio = { version = "1", features = ["sync"] }
\ No newline at end of file
use std::{
borrow::Borrow,
future::Future,
ops::RangeBounds,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use tokio::sync::Mutex;
use wgpu::{Buffer, BufferAddress, BufferAsyncError, BufferView, MapMode};
pub struct MABuffer<B = Buffer> {
pub(crate) inner: Arc<MABInner>,
pub(crate) buffer: B,
}
pub(crate) struct MABInner {
// Used since last submit. Waiting on the lock is required
pub(crate) accessed: AtomicBool,
// Locked while mapped or in submit
pub(crate) lock: Mutex<()>,
}
impl MABInner {
pub(crate) fn new() -> MABInner {
MABInner {
accessed: AtomicBool::new(false),
lock: Mutex::new(()),
}
}
}
impl<B: Borrow<Buffer>> MABuffer<B> {
/// Access the inner type, while marking it for locking on next submit
pub fn access(&self) -> &B {
self.inner.accessed.store(true, Ordering::Release);
&self.buffer
}
/// Has this MAB been accessed since last submit
pub fn accessed(&self) -> bool {
self.inner.accessed.load(Ordering::Acquire)
}
/// Map this buffer for reading, potentially waiting until any previous command execution has finished
pub async fn map_read<F, O>(
&self,
bounds: impl RangeBounds<BufferAddress>,
f: F,
) -> Result<O, BufferAsyncError>
where
F: Fn(BufferView) -> O,
{
// Wait until previous unmapping ends or submit ends
self.locked(async {
let buf = self.buffer.borrow();
// We can safely map because we are in exclusive lock
let slice = buf.slice(bounds);
slice.map_async(MapMode::Read).await?;
let range = slice.get_mapped_range();
let out = f(range);
// Remember to unmap as well. Range has gone out of scope and dropped
buf.unmap();
Ok(out)
})
.await
}
/// Is this buffer currently mapped (or locked for execution)
pub fn is_mapped(&self) -> bool {
self.inner.lock.try_lock().is_err()
}
// Lock this buffer for access
async fn locked<O>(&self, fut: impl Future<Output = O>) -> O {
let guard = self.inner.lock.lock().await;
let out = fut.await;
drop(guard);
out
}
}
mod buffer;
mod queue;
pub use buffer::MABuffer;
pub use queue::MAQueue;
use std::{
borrow::Borrow,
sync::{atomic::Ordering, Arc, Weak},
};
use tokio::sync::Mutex;
use wgpu::{CommandBuffer, Queue};
use crate::{buffer::MABInner, MABuffer};
pub struct MAQueue<Q = Queue> {
queue: Q,
buffers: Mutex<Vec<Weak<MABInner>>>,
}
impl<Q: Borrow<Queue>> MAQueue<Q> {
/// Make the provided queue mapping aware
pub fn new(queue: Q) -> Self {
let buffers = Mutex::new(vec![]);
Self { queue, buffers }
}
pub fn inner(&self) -> &Q {
&self.queue
}
/// Make the provided buffer mapping aware
pub async fn create_mab<B>(&self, buffer: B) -> MABuffer<B> {
let inner = Arc::new(MABInner::new());
self.buffers.lock().await.push(Arc::downgrade(&inner));
MABuffer { inner, buffer }
}
/// Submit the command buffers to underlying queue, ensuring that all buffers created for this queue are properly synchronised
pub async fn submit<I: IntoIterator<Item = CommandBuffer>>(&self, command_buffers: I) {
let mut buffers = self.buffers.lock().await;
// Take all buffers which have not been dropped
let all = buffers
.iter()
.filter_map(|b| b.upgrade())
.collect::<Vec<_>>();
let mut guards = vec![];
// Lock all accessed buffers
for mab in &all {
// If this buffer has been accessed (potentially used in I), we must wait for it to unlock
if mab.accessed.swap(false, Ordering::Acquire) {
guards.push(mab.lock.lock().await);
}
}
let queue = self.queue.borrow();
// We are now safe to submit command buffers, knowing no used buffer is mapped
queue.submit(command_buffers);
// We must wait until all work is complete to ensure no race condition exists
queue.on_submitted_work_done().await;
// Execution complete, buffers may be mapped again
drop(guards);
// Store all buffers that were not dropped
*buffers = all.iter().map(Arc::downgrade).collect();
}
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment