问题
Trying to understand the many issues related to the WebGL development for a generic mobile target, now I need to store depth information in a texture attachment for later retrieval and post-processing.
JavaScript:
var depthRB = gl.createRenderbuffer();
gl.bindRenderbuffer(gl.RENDERBUFFER, depthRB);
gl.renderbufferStorage(gl.RENDERBUFFER, gl.DEPTH_COMPONENT16, w, h);
gl.framebufferRenderbuffer(gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, depthRB);
gl.bindRenderbuffer(gl.RENDERBUFFER, null);
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, w, h, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
Vertex shader:
precision mediump float;
uniform mat4 u_transformMatrix;
attribute vec3 a_position;
varying float v_depth;
void main() {
vec4 tcoords = u_transformMatrix * vec4(a_position, 1.0);
v_depth = 0.5 * (tcoords.z + 1.0);
gl_Position = tcoords;,
}
Fragment shader:
precision mediump float;
varying float v_depth;
vec4 PackDepth(in float frag_depth) {
vec4 bitSh = vec4(256.0 * 256.0 * 256.0, 256.0 * 256.0, 256.0, 1.0);
vec4 bitMsk = vec4(0.0, 1.0 / 256.0, 1.0 / 256.0, 1.0 / 256.0);
vec4 enc = fract(frag_depth * bitSh);
enc -= enc.xxyz * bitMsk;
return enc;
}
float UnpackDepth( const in vec4 enc ) {
const vec4 bit_shift = vec4( 1.0 / ( 256.0 * 256.0 * 256.0 ), 1.0 / ( 256.0 * 256.0 ), 1.0 / 256.0, 1.0 );
float decoded = dot( enc, bit_shift );
return decoded;
}
void main() {
vec4 encoded_depth;
float decoded_depth;
encoded_depth = PackDepth(v_depth);
decoded_depth = UnpackDepth(encoded_depth);
//gl_FragColor = vec4(vec3(decoded_depth), 1.0);
gl_FragColor = encoded_depth;',
}
This is what i get now: left: iPad PRO/Android/desktop Chrome --emulate-shader-precision, middle: desktop FF/Chrome (no flags), right: encoded and decoded (obviously as 256 tones gray-scale)
I tried many different methods for packing/unpacking but none seems to work. Any advice about what I am doing wrong?
Moreover, i noticed also many examples of the most common WebGL libraries which uses a RGBA texture to store depth information are broken - i believe for the same reason, somewhere an issue in the pack/unpack functions.
EDIT: same issue in Three.js: https://github.com/mrdoob/three.js/issues/9092
Interesting thing, if I use the old mod approach to packing depth, I get a bunch more precision (at least a couple more bits)
Which is the correct approach to store and retrieve depth information, using mediump
precision?
回答1:
The floting point precision for a variable with the precsion qualifier mediump
is guaranteed to 10 bits.
See OpenGL ES Shading Language 1.00 Specification - 4.5.2 Precision Qualifiers, page 33
The required minimum ranges and precisions for precision qualifiers are:
For this reason, only the two highest bytes of the encoded depth have a meaning. The algorithm stores the highest byte in the alpha channel and the second highest byte in the blue color channel. This causes that an RGB view of the encoded depth may look arbitrary.
Further the algorithm has an overflow for the depth of 1.0. This causes that the the depth of 1 is encoded as a completely black color, but black becomes 0.0 when it is decoded.
An algorithm which encodes a depth value in the range from [0.0, 1.0], into 16 bits from b00000000 to b11111111, may look like this (RG color channel):
vec2 PackDepth16( in float depth )
{
float depthVal = depth * (256.0*256.0 - 1.0) / (256.0*256.0);
vec3 encode = fract( depthVal * vec3(1.0, 256.0, 256.0*256.0) );
return encode.xy - encode.yz / 256.0 + 1.0/512.0;
}
float UnpackDepth16( in vec2 pack )
{
float depth = dot( pack, 1.0 / vec2(1.0, 256.0) );
return depth * (256.0*256.0) / (256.0*256.0 - 1.0);
}
This algorithm can be extended to 24 bits or 32 bits:
vec3 PackDepth24( in float depth )
{
float depthVal = depth * (256.0*256.0*256.0 - 1.0) / (256.0*256.0*256.0);
vec4 encode = fract( depthVal * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return encode.xyz - encode.yzw / 256.0 + 1.0/512.0;
}
float UnpackDepth24( in vec3 pack )
{
float depth = dot( pack, 1.0 / vec3(1.0, 256.0, 256.0*256.0) );
return depth * (256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0);
}
vec4 PackDepth32( in float depth )
{
depth *= (256.0*256.0*256.0 - 1.0) / (256.0*256.0*256.0);
vec4 encode = fract( depth * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return vec4( encode.xyz - encode.yzw / 256.0, encode.w ) + 1.0/512.0;
}
float UnpackDepth32( in vec4 pack )
{
float depth = dot( pack, 1.0 / vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return depth * (256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0);
}
See the code snippet, which compares the algorithm from the answer (top part) and the algorithm from question (bottom part):
(function onLoad() {
// shader program object
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList, uniformNames ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.LinkProgram( shaderObjs )
if ( progObj != 0 ) {
progObj.unifomLocation = {};
for ( var i_n = 0; i_n < uniformNames.length; ++ i_n ) {
var name = uniformNames[i_n];
progObj.unifomLocation[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShaderProgram.Use = function( progObj ) { gl.useProgram( progObj ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderScript = document.getElementById(source);
if (shaderScript) {
source = "";
var node = shaderScript.firstChild;
while (node) {
if (node.nodeType == 3) source += node.textContent;
node = node.nextSibling;
}
}
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : 0;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : 0;
}
function drawScene(){
var canvas = document.getElementById( "ogl-canvas" );
var vp = [canvas.width, canvas.height];
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
ShaderProgram.Use( progDraw );
gl.enableVertexAttribArray( progDraw.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.drawArrays( gl.TRIANGLE_STRIP, 0, 4 );
gl.disableVertexAttribArray( progDraw.pos );
}
var gl;
var prog;
var bufObj = {};
var canvas
function sceneStart() {
container = document.getElementById('container');
canvas = document.getElementById( "ogl-canvas");
resize();
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = ShaderProgram.Create(
[ { source : "draw-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "draw-shader-fs", stage : gl.FRAGMENT_SHADER }
], [] );
progDraw.inPos = gl.getAttribLocation( progDraw, "inPos" );
if ( prog == 0 )
return;
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( [ -1, -1, 1, -1, -1, 1, 1, 1 ] ), gl.STATIC_DRAW );
window.onresize = resize;
setInterval(drawScene, 50);
}
function resize() {
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
}
sceneStart();
})();
<canvas id="ogl-canvas"></canvas>
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 vertPos;
void main()
{
vertPos = inPos;
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 vertPos;
vec2 PackDepth16( in float depth )
{
float depthVal = depth * (256.0*256.0 - 1.0) / (256.0*256.0);
vec3 encode = fract( depthVal * vec3(1.0, 256.0, 256.0*256.0) );
return encode.xy - encode.yz / 256.0 + 1.0/512.0;
}
float UnpackDepth16( in vec2 pack )
{
float depth = dot( pack, 1.0 / vec2(1.0, 256.0) );
return depth * (256.0*256.0) / (256.0*256.0 - 1.0);
}
vec4 PackDepth32_orig(in float frag_depth) {
vec4 bitSh = vec4(256.0 * 256.0 * 256.0, 256.0 * 256.0, 256.0, 1.0);
vec4 bitMsk = vec4(0.0, 1.0 / 256.0, 1.0 / 256.0, 1.0 / 256.0);
vec4 enc = fract(frag_depth * bitSh);
enc -= enc.xxyz * bitMsk;
return enc;
}
float UnpackDepth32_orig( const in vec4 enc ) {
const vec4 bit_shift = vec4( 1.0 / ( 256.0 * 256.0 * 256.0 ), 1.0 / ( 256.0 * 256.0 ), 1.0 / 256.0, 1.0 );
float decoded = dot( enc, bit_shift );
return decoded;
}
void main()
{
float depthTest = clamp(vertPos.x + 0.5, 0.0, 1.0);
vec2 color1 = clamp(PackDepth16( depthTest ), 0.0, 1.0);
float depth1 = UnpackDepth16( color1 );
vec4 color2 = clamp(PackDepth32_orig( depthTest ), 0.0, 1.0);
float depth2 = UnpackDepth32_orig( color2 );
gl_FragColor = vec4( mix( vec3(depth1), vec3(depth2), step(vertPos.y, 0.0) ), 1.0 );
}
</script>
来源:https://stackoverflow.com/questions/48288154/pack-depth-information-in-a-rgba-texture-using-mediump-precison