Commit 9f87182e authored by Noxim's avatar Noxim
Browse files

initial commit

name = "wgpu-mapping-tracking"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at
wgpu = "0.12"
tokio = { version = "1", features = ["sync"] }
\ No newline at end of file
use std::{
atomic::{AtomicBool, Ordering},
use tokio::sync::Mutex;
use wgpu::{Buffer, BufferAddress, BufferAsyncError, BufferView, MapMode};
pub struct MABuffer<B = Buffer> {
pub(crate) inner: Arc<MABInner>,
pub(crate) buffer: B,
pub(crate) struct MABInner {
// Used since last submit. Waiting on the lock is required
pub(crate) accessed: AtomicBool,
// Locked while mapped or in submit
pub(crate) lock: Mutex<()>,
impl MABInner {
pub(crate) fn new() -> MABInner {
MABInner {
accessed: AtomicBool::new(false),
lock: Mutex::new(()),
impl<B: Borrow<Buffer>> MABuffer<B> {
/// Access the inner type, while marking it for locking on next submit
pub fn access(&self) -> &B {, Ordering::Release);
/// Has this MAB been accessed since last submit
pub fn accessed(&self) -> bool {
/// Map this buffer for reading, potentially waiting until any previous command execution has finished
pub async fn map_read<F, O>(
bounds: impl RangeBounds<BufferAddress>,
f: F,
) -> Result<O, BufferAsyncError>
F: Fn(BufferView) -> O,
// Wait until previous unmapping ends or submit ends
self.locked(async {
let buf = self.buffer.borrow();
// We can safely map because we are in exclusive lock
let slice = buf.slice(bounds);
let range = slice.get_mapped_range();
let out = f(range);
// Remember to unmap as well. Range has gone out of scope and dropped
/// Is this buffer currently mapped (or locked for execution)
pub fn is_mapped(&self) -> bool {
// Lock this buffer for access
async fn locked<O>(&self, fut: impl Future<Output = O>) -> O {
let guard = self.inner.lock.lock().await;
let out = fut.await;
mod buffer;
mod queue;
pub use buffer::MABuffer;
pub use queue::MAQueue;
use std::{
sync::{atomic::Ordering, Arc, Weak},
use tokio::sync::Mutex;
use wgpu::{CommandBuffer, Queue};
use crate::{buffer::MABInner, MABuffer};
pub struct MAQueue<Q = Queue> {
queue: Q,
buffers: Mutex<Vec<Weak<MABInner>>>,
impl<Q: Borrow<Queue>> MAQueue<Q> {
/// Make the provided queue mapping aware
pub fn new(queue: Q) -> Self {
let buffers = Mutex::new(vec![]);
Self { queue, buffers }
pub fn inner(&self) -> &Q {
/// Make the provided buffer mapping aware
pub async fn create_mab<B>(&self, buffer: B) -> MABuffer<B> {
let inner = Arc::new(MABInner::new());
MABuffer { inner, buffer }
/// Submit the command buffers to underlying queue, ensuring that all buffers created for this queue are properly synchronised
pub async fn submit<I: IntoIterator<Item = CommandBuffer>>(&self, command_buffers: I) {
let mut buffers = self.buffers.lock().await;
// Take all buffers which have not been dropped
let all = buffers
.filter_map(|b| b.upgrade())
let mut guards = vec![];
// Lock all accessed buffers
for mab in &all {
// If this buffer has been accessed (potentially used in I), we must wait for it to unlock
if mab.accessed.swap(false, Ordering::Acquire) {
let queue = self.queue.borrow();
// We are now safe to submit command buffers, knowing no used buffer is mapped
// We must wait until all work is complete to ensure no race condition exists
// Execution complete, buffers may be mapped again
// Store all buffers that were not dropped
*buffers = all.iter().map(Arc::downgrade).collect();
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment