问题
I’m trying to implement GPU picking with Points using code I modified from the latter half of this article https://threejsfundamentals.org/threejs/lessons/threejs-picking.html
It’s been working fine for me on desktop, but I started testing different browsers and devices and it doesn’t work consistently. I made a Codepen to illustrate https://codepen.io/deklanw/pen/OJVVmEd?editors=1111
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script type="module">
// Three.js - Picking - RayCaster w/Transparency
// from https://threejsfundamentals.org/threejs/threejs-picking-gpu.html
import * as THREE from "https://threejsfundamentals.org/threejs/resources/threejs/r113/build/three.module.js";
function main() {
const canvas = document.querySelector("#c");
const renderer = new THREE.WebGLRenderer({ canvas });
const fov = 60;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 200;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 30;
const scene = new THREE.Scene();
scene.background = new THREE.Color(0);
const pickingScene = new THREE.Scene();
pickingScene.background = new THREE.Color(0);
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
scene.add(cameraPole);
cameraPole.add(camera);
function randomNormalizedColor() {
return Math.random();
}
function getRandomInt(n) {
return Math.floor(Math.random() * n);
}
function getCanvasRelativePosition(e) {
const rect = canvas.getBoundingClientRect();
return {
x: e.clientX - rect.left,
y: e.clientY - rect.top
};
}
const textureLoader = new THREE.TextureLoader();
const particleTexture =
"https://raw.githubusercontent.com/mrdoob/three.js/master/examples/textures/sprites/ball.png";
const vertexShader = `
attribute float size;
attribute vec3 customColor;
varying vec3 vColor;
void main() {
vColor = customColor;
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
gl_PointSize = size * ( 100.0 / length( mvPosition.xyz ) );
gl_Position = projectionMatrix * mvPosition;
}
`;
const fragmentShader = `
uniform sampler2D texture;
varying vec3 vColor;
void main() {
vec4 tColor = texture2D( texture, gl_PointCoord );
if (tColor.a < 0.5) discard;
gl_FragColor = mix( vec4( vColor.rgb, 1.0 ), tColor, 0.1 );
}
`;
const pickFragmentShader = `
uniform sampler2D texture;
varying vec3 vColor;
void main() {
vec4 tColor = texture2D( texture, gl_PointCoord );
if (tColor.a < 0.25) discard;
gl_FragColor = vec4( vColor.rgb, 1.0);
}
`;
const materialSettings = {
uniforms: {
texture: {
type: "t",
value: textureLoader.load(particleTexture)
}
},
vertexShader: vertexShader,
fragmentShader: fragmentShader,
blending: THREE.NormalBlending,
depthTest: true,
transparent: false
};
const createParticleMaterial = () => {
const material = new THREE.ShaderMaterial(materialSettings);
return material;
};
const createPickingMaterial = () => {
const material = new THREE.ShaderMaterial({
...materialSettings,
fragmentShader: pickFragmentShader,
blending: THREE.NormalBlending
});
return material;
};
const geometry = new THREE.BufferGeometry();
const pickingGeometry = new THREE.BufferGeometry();
const colors = [];
const sizes = [];
const pickingColors = [];
const pickingColor = new THREE.Color();
const positions = [];
for (let i = 0; i < 30; i++) {
colors[3 * i] = randomNormalizedColor();
colors[3 * i + 1] = randomNormalizedColor();
colors[3 * i + 2] = randomNormalizedColor();
const rgbPickingColor = pickingColor.setHex(i + 1);
pickingColors[3 * i] = rgbPickingColor.r;
pickingColors[3 * i + 1] = rgbPickingColor.g;
pickingColors[3 * i + 2] = rgbPickingColor.b;
sizes[i] = getRandomInt(20);
positions[3 * i] = getRandomInt(20);
positions[3 * i + 1] = getRandomInt(20);
positions[3 * i + 2] = getRandomInt(20);
}
geometry.setAttribute(
"position",
new THREE.Float32BufferAttribute(positions, 3)
);
geometry.setAttribute(
"customColor",
new THREE.Float32BufferAttribute(colors, 3)
);
geometry.setAttribute("size", new THREE.Float32BufferAttribute(sizes, 1));
geometry.computeBoundingBox();
const material = createParticleMaterial();
const points = new THREE.Points(geometry, material);
// setup geometry and material for GPU picking
pickingGeometry.setAttribute(
"position",
new THREE.Float32BufferAttribute(positions, 3)
);
pickingGeometry.setAttribute(
"customColor",
new THREE.Float32BufferAttribute(pickingColors, 3)
);
pickingGeometry.setAttribute(
"size",
new THREE.Float32BufferAttribute(sizes, 1)
);
pickingGeometry.computeBoundingBox();
const pickingMaterial = createPickingMaterial();
const pickingPoints = new THREE.Points(pickingGeometry, pickingMaterial);
scene.add(points);
pickingScene.add(pickingPoints);
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
class GPUPickHelper {
constructor() {
// create a 1x1 pixel render target
this.pickingTexture = new THREE.WebGLRenderTarget(1, 1);
this.pixelBuffer = new Uint8Array(4);
}
pick(cssPosition, pickingScene, camera) {
const { pickingTexture, pixelBuffer } = this;
// set the view offset to represent just a single pixel under the mouse
const pixelRatio = renderer.getPixelRatio();
camera.setViewOffset(
renderer.getContext().drawingBufferWidth, // full width
renderer.getContext().drawingBufferHeight, // full top
(cssPosition.x * pixelRatio) | 0, // rect x
(cssPosition.y * pixelRatio) | 0, // rect y
1, // rect width
1 // rect height
);
// render the scene
renderer.setRenderTarget(pickingTexture);
renderer.render(pickingScene, camera);
renderer.setRenderTarget(null);
// clear the view offset so rendering returns to normal
camera.clearViewOffset();
//read the pixel
renderer.readRenderTargetPixels(
pickingTexture,
0, // x
0, // y
1, // width
1, // height
pixelBuffer
);
const id =
(pixelBuffer[0] << 16) | (pixelBuffer[1] << 8) | pixelBuffer[2];
console.log(`You clicked sphere number ${id}`);
return id;
}
}
const pickHelper = new GPUPickHelper();
function render(time) {
time *= 0.001; // convert to seconds;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * 0.1;
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function onClick(e) {
const pickPosition = getCanvasRelativePosition(e);
const pickedID = pickHelper.pick(pickPosition, pickingScene, camera);
}
function onTouch(e) {
const touch = e.touches[0];
const pickPosition = getCanvasRelativePosition(touch);
const pickedID = pickHelper.pick(pickPosition, pickingScene, camera);
}
window.addEventListener("mousedown", onClick);
window.addEventListener("touchstart", onTouch);
}
main();
</script>
If you click (or tap) on the nodes their IDs should pop up in the console. On some devices I’m just getting 0, as in picking the background.
Anyone know why?
Also, if there’s a way to do picking in this case (Point mesh with variable size points via ShaderMaterial) with an easier method that’s still performant, I’m curious about how
EDIT:
I removed the 1x1 render target optimization and it seems to have fixed it. Now I'd like to know what about that optimization causes the problem..
回答1:
the problem is you can't use Points this way across devices.
Whether a point is drawn when its center is offscreen or not is device independent (the OpenGL ES / WebGL spec says it's still supposed to be drawn, the OpenGL spec says it's not. There are no tests for it so each driver is different) and it would be too much work for WebGL implentations to work around so they don't. AFAIK Intel and NVidia do draw them. AMD and PowerVR based (iPhone) do not draw them.
You can see this problem if you make the circles large and you make sure they go offscreen (and you may need to make your canvas small). On some devices they will smoothly go offscreen, on other devices as soon as their center goes offscreen they will disappear (often depending on the size of the point and the size of the viewport)
This means your example does not really work in either case, with or without the 1x1 pixel render target it's just that with the 1x1 pixel render target pretty much all of the circles have their center outside that 1x1 pixel area so they don't get drawn on some devices. When you make the render target match the size of the canvas then most of the circles' centers are inside but you'll still get picking errors at the edges.
To solve this you'll need to draw your points using quads instead of points. There are many ways to do that. Draw each quad as a separate mesh or sprite, or merge all the quads into another mesh, or use InstancedMesh
where you'll need a matrix per point, or write custom shaders to do points (see the last example on this article)
Note that points have other issues too. By default they don't scale relative to the canvas size (of course you can fix this in your shader and three.js has this option as well). They also have a device independent maximum size which according to the spec can be as low as 1 pixel. They don't respond well to device pixel ratio settings (though you could fix that in code as well). For all those reasons points have a limited uses. The large circles the code is drawing is arguably beyond that limit.
来源:https://stackoverflow.com/questions/60176980/gpu-picking-inconsistent-across-devices