screen_space_reflection.glsl 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /* clang-format off */
  2. [vertex]
  3. layout(location = 0) in highp vec4 vertex_attrib;
  4. /* clang-format on */
  5. layout(location = 4) in vec2 uv_in;
  6. out vec2 uv_interp;
  7. out vec2 pos_interp;
  8. void main() {
  9. uv_interp = uv_in;
  10. gl_Position = vertex_attrib;
  11. pos_interp.xy = gl_Position.xy;
  12. }
  13. /* clang-format off */
  14. [fragment]
  15. in vec2 uv_interp;
  16. /* clang-format on */
  17. in vec2 pos_interp;
  18. uniform sampler2D source_diffuse; //texunit:0
  19. uniform sampler2D source_normal_roughness; //texunit:1
  20. uniform sampler2D source_depth; //texunit:2
  21. uniform float camera_z_near;
  22. uniform float camera_z_far;
  23. uniform vec2 viewport_size;
  24. uniform vec2 pixel_size;
  25. uniform float filter_mipmap_levels;
  26. uniform mat4 inverse_projection;
  27. uniform mat4 projection;
  28. uniform int num_steps;
  29. uniform float depth_tolerance;
  30. uniform float distance_fade;
  31. uniform float curve_fade_in;
  32. layout(location = 0) out vec4 frag_color;
  33. vec2 view_to_screen(vec3 view_pos, out float w) {
  34. vec4 projected = projection * vec4(view_pos, 1.0);
  35. projected.xyz /= projected.w;
  36. projected.xy = projected.xy * 0.5 + 0.5;
  37. w = projected.w;
  38. return projected.xy;
  39. }
  40. #define M_PI 3.14159265359
  41. void main() {
  42. vec4 diffuse = texture(source_diffuse, uv_interp);
  43. vec4 normal_roughness = texture(source_normal_roughness, uv_interp);
  44. vec3 normal;
  45. normal = normal_roughness.xyz * 2.0 - 1.0;
  46. float roughness = normal_roughness.w;
  47. float depth_tex = texture(source_depth, uv_interp).r;
  48. vec4 world_pos = inverse_projection * vec4(uv_interp * 2.0 - 1.0, depth_tex * 2.0 - 1.0, 1.0);
  49. vec3 vertex = world_pos.xyz / world_pos.w;
  50. vec3 view_dir = normalize(vertex);
  51. vec3 ray_dir = normalize(reflect(view_dir, normal));
  52. if (dot(ray_dir, normal) < 0.001) {
  53. frag_color = vec4(0.0);
  54. return;
  55. }
  56. //ray_dir = normalize(view_dir - normal * dot(normal,view_dir) * 2.0);
  57. //ray_dir = normalize(vec3(1,1,-1));
  58. ////////////////
  59. //make ray length and clip it against the near plane (don't want to trace beyond visible)
  60. float ray_len = (vertex.z + ray_dir.z * camera_z_far) > -camera_z_near ? (-camera_z_near - vertex.z) / ray_dir.z : camera_z_far;
  61. vec3 ray_end = vertex + ray_dir * ray_len;
  62. float w_begin;
  63. vec2 vp_line_begin = view_to_screen(vertex, w_begin);
  64. float w_end;
  65. vec2 vp_line_end = view_to_screen(ray_end, w_end);
  66. vec2 vp_line_dir = vp_line_end - vp_line_begin;
  67. //we need to interpolate w along the ray, to generate perspective correct reflections
  68. w_begin = 1.0 / w_begin;
  69. w_end = 1.0 / w_end;
  70. float z_begin = vertex.z * w_begin;
  71. float z_end = ray_end.z * w_end;
  72. vec2 line_begin = vp_line_begin / pixel_size;
  73. vec2 line_dir = vp_line_dir / pixel_size;
  74. float z_dir = z_end - z_begin;
  75. float w_dir = w_end - w_begin;
  76. // clip the line to the viewport edges
  77. float scale_max_x = min(1.0, 0.99 * (1.0 - vp_line_begin.x) / max(1e-5, vp_line_dir.x));
  78. float scale_max_y = min(1.0, 0.99 * (1.0 - vp_line_begin.y) / max(1e-5, vp_line_dir.y));
  79. float scale_min_x = min(1.0, 0.99 * vp_line_begin.x / max(1e-5, -vp_line_dir.x));
  80. float scale_min_y = min(1.0, 0.99 * vp_line_begin.y / max(1e-5, -vp_line_dir.y));
  81. float line_clip = min(scale_max_x, scale_max_y) * min(scale_min_x, scale_min_y);
  82. line_dir *= line_clip;
  83. z_dir *= line_clip;
  84. w_dir *= line_clip;
  85. //clip z and w advance to line advance
  86. vec2 line_advance = normalize(line_dir); //down to pixel
  87. float step_size = length(line_advance) / length(line_dir);
  88. float z_advance = z_dir * step_size; // adapt z advance to line advance
  89. float w_advance = w_dir * step_size; // adapt w advance to line advance
  90. //make line advance faster if direction is closer to pixel edges (this avoids sampling the same pixel twice)
  91. float advance_angle_adj = 1.0 / max(abs(line_advance.x), abs(line_advance.y));
  92. line_advance *= advance_angle_adj; // adapt z advance to line advance
  93. z_advance *= advance_angle_adj;
  94. w_advance *= advance_angle_adj;
  95. vec2 pos = line_begin;
  96. float z = z_begin;
  97. float w = w_begin;
  98. float z_from = z / w;
  99. float z_to = z_from;
  100. float depth;
  101. vec2 prev_pos = pos;
  102. bool found = false;
  103. float steps_taken = 0.0;
  104. for (int i = 0; i < num_steps; i++) {
  105. pos += line_advance;
  106. z += z_advance;
  107. w += w_advance;
  108. //convert to linear depth
  109. depth = texture(source_depth, pos * pixel_size).r * 2.0 - 1.0;
  110. #ifdef USE_ORTHOGONAL_PROJECTION
  111. depth = ((depth + (camera_z_far + camera_z_near) / (camera_z_far - camera_z_near)) * (camera_z_far - camera_z_near)) / 2.0;
  112. #else
  113. depth = 2.0 * camera_z_near * camera_z_far / (camera_z_far + camera_z_near - depth * (camera_z_far - camera_z_near));
  114. #endif
  115. depth = -depth;
  116. z_from = z_to;
  117. z_to = z / w;
  118. if (depth > z_to) {
  119. //if depth was surpassed
  120. if (depth <= max(z_to, z_from) + depth_tolerance) {
  121. //check the depth tolerance
  122. found = true;
  123. }
  124. break;
  125. }
  126. steps_taken += 1.0;
  127. prev_pos = pos;
  128. }
  129. if (found) {
  130. float margin_blend = 1.0;
  131. vec2 margin = vec2((viewport_size.x + viewport_size.y) * 0.5 * 0.05); //make a uniform margin
  132. if (any(bvec4(lessThan(pos, -margin), greaterThan(pos, viewport_size + margin)))) {
  133. //clip outside screen + margin
  134. frag_color = vec4(0.0);
  135. return;
  136. }
  137. {
  138. //blend fading out towards external margin
  139. vec2 margin_grad = mix(pos - viewport_size, -pos, lessThan(pos, vec2(0.0)));
  140. margin_blend = 1.0 - smoothstep(0.0, margin.x, max(margin_grad.x, margin_grad.y));
  141. //margin_blend=1.0;
  142. }
  143. vec2 final_pos;
  144. float grad;
  145. grad = steps_taken / float(num_steps);
  146. float initial_fade = curve_fade_in == 0.0 ? 1.0 : pow(clamp(grad, 0.0, 1.0), curve_fade_in);
  147. float fade = pow(clamp(1.0 - grad, 0.0, 1.0), distance_fade) * initial_fade;
  148. final_pos = pos;
  149. #ifdef REFLECT_ROUGHNESS
  150. vec4 final_color;
  151. //if roughness is enabled, do screen space cone tracing
  152. if (roughness > 0.001) {
  153. ///////////////////////////////////////////////////////////////////////////////////////
  154. //use a blurred version (in consecutive mipmaps) of the screen to simulate roughness
  155. float gloss = 1.0 - roughness;
  156. float cone_angle = roughness * M_PI * 0.5;
  157. vec2 cone_dir = final_pos - line_begin;
  158. float cone_len = length(cone_dir);
  159. cone_dir = normalize(cone_dir); //will be used normalized from now on
  160. float max_mipmap = filter_mipmap_levels - 1.0;
  161. float gloss_mult = gloss;
  162. float rem_alpha = 1.0;
  163. final_color = vec4(0.0);
  164. for (int i = 0; i < 7; i++) {
  165. float op_len = 2.0 * tan(cone_angle) * cone_len; //opposite side of iso triangle
  166. float radius;
  167. {
  168. //fit to sphere inside cone (sphere ends at end of cone), something like this:
  169. // ___
  170. // \O/
  171. // V
  172. //
  173. // as it avoids bleeding from beyond the reflection as much as possible. As a plus
  174. // it also makes the rough reflection more elongated.
  175. float a = op_len;
  176. float h = cone_len;
  177. float a2 = a * a;
  178. float fh2 = 4.0f * h * h;
  179. radius = (a * (sqrt(a2 + fh2) - a)) / (4.0f * h);
  180. }
  181. //find the place where screen must be sampled
  182. vec2 sample_pos = (line_begin + cone_dir * (cone_len - radius)) * pixel_size;
  183. //radius is in pixels, so it's natural that log2(radius) maps to the right mipmap for the amount of pixels
  184. float mipmap = clamp(log2(radius), 0.0, max_mipmap);
  185. //mipmap = max(mipmap-1.0,0.0);
  186. //do sampling
  187. vec4 sample_color;
  188. {
  189. sample_color = textureLod(source_diffuse, sample_pos, mipmap);
  190. }
  191. //multiply by gloss
  192. sample_color.rgb *= gloss_mult;
  193. sample_color.a = gloss_mult;
  194. rem_alpha -= sample_color.a;
  195. if (rem_alpha < 0.0) {
  196. sample_color.rgb *= (1.0 - abs(rem_alpha));
  197. }
  198. final_color += sample_color;
  199. if (final_color.a >= 0.95) {
  200. // This code of accumulating gloss and aborting on near one
  201. // makes sense when you think of cone tracing.
  202. // Think of it as if roughness was 0, then we could abort on the first
  203. // iteration. For lesser roughness values, we need more iterations, but
  204. // each needs to have less influence given the sphere is smaller
  205. break;
  206. }
  207. cone_len -= radius * 2.0; //go to next (smaller) circle.
  208. gloss_mult *= gloss;
  209. }
  210. } else {
  211. final_color = textureLod(source_diffuse, final_pos * pixel_size, 0.0);
  212. }
  213. frag_color = vec4(final_color.rgb, fade * margin_blend);
  214. #else
  215. frag_color = vec4(textureLod(source_diffuse, final_pos * pixel_size, 0.0).rgb, fade * margin_blend);
  216. #endif
  217. } else {
  218. frag_color = vec4(0.0, 0.0, 0.0, 0.0);
  219. }
  220. }