Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 118 additions & 0 deletions src/webgpu/p5.RendererWebGPU.js
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,124 @@ function rendererWebGPU(p5, fn) {
device.queue.writeBuffer(this.buffer, 0, floatData);
}
}

/**
* Reads data from a storage buffer back into JavaScript.
*
* Copies data from the GPU to the CPU using a temporary buffer,
* so it must be awaited. Returns a `Float32Array` for number
* buffers, or an array of plain objects for struct buffers.
*
* Note: This is a GPU -> CPU read, so calling it often (like every frame)
* can be slow.
*
* ```js example
* let data;
* let computeShader;
*
* async function setup() {
* await createCanvas(100, 100, WEBGPU);
*
* data = createStorage(new Float32Array([1, 2, 3, 4]));
* computeShader = buildComputeShader(doubleValues);
* compute(computeShader, 4);
*
* let result = await data.read();
* // result is Float32Array [2, 4, 6, 8]
* for (let i = 0; i < result.length; i++) {
* print(result[i]);
* }
* describe('Prints the values 2, 4, 6, 8 to the console.');
* }
*
* function doubleValues() {
* let d = uniformStorage(data);
* let idx = index.x;
* d[idx] = d[idx] * 2;
* }
* ```
*
* @method read
* @for p5.StorageBuffer
* @beta
* @webgpu
* @webgpuOnly
* @returns {Promise<Float32Array|Object[]>}
*/
async read() {
const device = this._renderer.device;
this._renderer.flushDraw();

const stagingBuffer = device.createBuffer({
size: this.size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});

const commandEncoder = device.createCommandEncoder();
commandEncoder.copyBufferToBuffer(this.buffer, 0, stagingBuffer, 0, this.size);
device.queue.submit([commandEncoder.finish()]);

await stagingBuffer.mapAsync(GPUMapMode.READ, 0, this.size);
const mappedRange = stagingBuffer.getMappedRange(0, this.size);

// Copy before unmapping because mapped memory becomes invalid after unmap
const rawCopy = new Float32Array(mappedRange.byteLength / 4);
rawCopy.set(new Float32Array(mappedRange));

stagingBuffer.unmap();
stagingBuffer.destroy();

if (this._schema !== null) {
return this._unpackStructArray(rawCopy, this._schema);
}
return rawCopy;
}

// Inverse of _packStructArray reads packed buffer back into plain JS objects
// using the same schema layout - fields, stride and offsets
_unpackStructArray(floatView, schema) {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you think we can move this to the renderer next to _packStructArray for consistency, and defer to the renderer from this class?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, that makes sense. I will move _unpackStructArray next to _packStructArray in the renderer and call it from here for consistency.

const { fields, stride } = schema;
const dataView = new DataView(floatView.buffer);
const count = Math.floor(floatView.byteLength / stride);
const result = [];

for (let i = 0; i < count; i++) {
const item = {};
const baseOffset = i * stride;
for (const field of fields) {
const byteOffset = baseOffset + field.offset;
const n = field.size / 4;

if (field.baseType === 'u32') {
if (n === 1) {
item[field.name] = dataView.getUint32(byteOffset, true);
} else {
item[field.name] = Array.from({ length: n }, (_, j) =>
dataView.getUint32(byteOffset + j * 4, true)
);
}
} else if (field.baseType === 'i32') {
if (n === 1) {
item[field.name] = dataView.getInt32(byteOffset, true);
} else {
item[field.name] = Array.from({ length: n }, (_, j) =>
dataView.getInt32(byteOffset + j * 4, true)
);
}
} else {
const idx = byteOffset / 4;
if (n === 1) {
item[field.name] = floatView[idx];
} else {
item[field.name] = Array.from(floatView.slice(idx, idx + n));
}
}
}
result.push(item);
}

return result;
}
}

/**
Expand Down
85 changes: 85 additions & 0 deletions test/unit/webgpu/p5.RendererWebGPU.js
Original file line number Diff line number Diff line change
Expand Up @@ -160,4 +160,89 @@ suite('WebGPU p5.RendererWebGPU', function() {
expect(myp5._renderer).to.exist;
});
});

suite('StorageBuffer.read()', function() {
test('reads back float array data', async function() {
const input = new Float32Array([1, 2, 3, 4]);
const buf = myp5.createStorage(input);

const result = await buf.read();

expect(result).to.be.instanceOf(Float32Array);
expect(result.length).to.equal(input.length);
for (let i = 0; i < input.length; i++) {
expect(result[i]).to.be.closeTo(input[i], 0.001);
}
});

test('reads back struct array data', async function() {
const input = [
{ x: 1.0, y: 2.0 },
{ x: 3.0, y: 4.0 },
];
const buf = myp5.createStorage(input);

const result = await buf.read();

expect(result).to.be.an('array');
expect(result.length).to.equal(input.length);
for (let i = 0; i < input.length; i++) {
expect(result[i].x).to.be.closeTo(input[i].x, 0.001);
expect(result[i].y).to.be.closeTo(input[i].y, 0.001);
}
});

test('read after update returns new data', async function() {
const buf = myp5.createStorage(new Float32Array([10, 20, 30]));
const updated = new Float32Array([100, 200, 300]);
buf.update(updated);

const result = await buf.read();

for (let i = 0; i < updated.length; i++) {
expect(result[i]).to.be.closeTo(updated[i], 0.001);
}
});

test('reads back struct with vector fields', async function() {
const input = [
{ position: myp5.createVector(1, 2), speed: 5.0 },
{ position: myp5.createVector(3, 4), speed: 10.0 },
];
const buf = myp5.createStorage(input);

const result = await buf.read();

expect(result).to.be.an('array');
expect(result.length).to.equal(2);
// Vector fields come back as plain arrays
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ahh good catch. I guess we don't currently capture in the schema if these values came from arrays, vectors, or colors. I think we might want to record that so we can deserialize into the same format it came in as. Do you think something like that is feasible?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah that’s a good point, right now we’re just returning plain data, but preserving the original types like vectors or colors would be useful.

I think it’s doable by extending the schema to store the original input type, but it might add some complexity. I can look into that as a follow-up issue if that sounds good.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it might be worth exploring upfront, just because it changes the API a bit -- e.g. if we were to release just this then change the structure of return values, it could break users' code. (I don't think we're going to do a release in between PRs, but that's generally how I think about API changes -- we generally want to avoid, if we can, merging code with an API that we think we'll change.)

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we think the return format might change, it’s better to settle that before merging. Would you prefer that we preserve original types like vectors or colors in this PR, or keep the current plain data approach and commit to that API for now?

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's see what preserving the original types involves. If it requires a lot of changes let me know and we can talk about whether it might be better to omit it, but if we can make it work, it feels like that'd be the preferable user experience.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got it, I’ll explore preserving the original types and see how much it impacts the current schema logic. I’ll try to keep it minimal and share an update.

expect(result[0].position[0]).to.be.closeTo(1, 0.001);
expect(result[0].position[1]).to.be.closeTo(2, 0.001);
expect(result[0].speed).to.be.closeTo(5.0, 0.001);
expect(result[1].position[0]).to.be.closeTo(3, 0.001);
expect(result[1].position[1]).to.be.closeTo(4, 0.001);
expect(result[1].speed).to.be.closeTo(10.0, 0.001);
});

test('reads back data modified by a compute shader', async function() {
const input = new Float32Array([1, 2, 3, 4]);
const buf = myp5.createStorage(input);

const computeShader = myp5.buildComputeShader(() => {
const d = myp5.uniformStorage();
const idx = myp5.index.x;
d[idx] = d[idx] * 2;
}, { myp5 });

computeShader.setUniform('d', buf);
myp5.compute(computeShader, 4);

const result = await buf.read();

expect(result).to.be.instanceOf(Float32Array);
for (let i = 0; i < input.length; i++) {
expect(result[i]).to.be.closeTo(input[i] * 2, 0.001);
}
});
});
});
Loading