unity卡后处理获取阴影信息,用来计算体积光?

记录1:光方向挤出参考1.1:Unity3D简单体积光的实现_Marco&GalaxyDragon的博客-CSDN博客_unity 体积光1.2:Unity Shader-GodRay,体积光(BillBoard,Volume Shadow,Raidal Blur,Ray-Marching)1.3:YakShaver:Unity Shader学习:体积光(Volumetric Light)简单来说就是拉伸模型,再改透明度和颜色,模拟体积光当然,用正面的vertex拉伸也是一样,目的是拉长因为要用到透明度混合,所以Queue设为Transparent Tags
{
"Queue" = "Transparent" "IgnoreProjector"="True" "RenderType"="Transparent"
}
pass
{
Tags {"LightMode"="ForwardBase"}
ZWrite Off
Blend SrcAlpha OneMinusSrcAlpha
//Cull OffCull Off用不用,对最后效果影响不大v2f vert(appdata_base v) : POSITION
{
v2f o;
float3 toLight = ObjSpaceLightDir(v.vertex);
//光方向点乘法线方向,判断背光面和向光面,
float extrude = dot(toLight, v.normal) < 0.0 ? 1.0 : 0.0;
//v.vertex.xyz += v.normal * 0.02;//沿着法线方向拉伸一点点,微调效果用的
//然后取一面顺着背光面进行拉伸,extrudeDistance控制拉伸长度
v.vertex.xyz -= toLight * (extrude * extrudeDistance);
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord.xy;
//保存对象空间的pos到frag里使用
o.objPos = v.vertex;
return o;
}
float4 frag(v2f i):COLOR
{
fixed4 tex = tex2D(_MainTex, i.uv);
//对象空间中,点到原点的距离,根据这个距离来控制光强度
float att = 1 / (1 + length(i.objPos));
//Intensity,pow,都是控制项
float4 c = pow(min(1,tex * BaseColor * att*Intensity), Pow);
return c;
}效果https://www.zhihu.com/video/1499239516825645056这里有个问题,unity自带的cube貌似不是整体,拉伸后每个面都是独立存在的,中间并没有插值缺点:每个物体都要绑定该shader记录2:径向模糊参考2.1:Chapter 13. Volumetric Light Scattering as a Post-Process2.2:Unity Shader-GodRay,体积光(BillBoard,Volume Shadow,Raidal Blur,Ray-Marching)2.3:FOXhunt:游戏开发相关实时渲染技术之体积光简单来说就是屏幕空间内,把每个点朝光源方向步近,每步进一次就查询该点颜色,加到原点上,达到径向模糊的效果图1图中,光源点是从worldspace转到clipspace,在shader的frag中获得屏幕空间的坐标步骤:1:渲染出整个画面2:抽取出画面中高亮的部分3:对高亮部分进行径向模糊4:将径向模糊后的高亮层和原图合并借图一用nvidia上的伪代码float4 main(float2 texCoord : TEXCOORD0) : COLOR0 {
// Calculate vector from pixel to light source in screen space.
//计算屏幕空间中从像素到光源的向量。
half2 deltaTexCoord = (texCoord - ScreenLightPos.xy);
// Divide by number of samples and scale by control factor.
//除以样本数,按控制因子缩放。
deltaTexCoord *= 1.0f / NUM_SAMPLES * Density;
// Store initial sample.
//保存初始样例。
half3 color = tex2D(frameSampler, texCoord);
// Set up illumination decay factor.
//设置光照衰减因子。
half illuminationDecay = 1.0f;
// Evaluate summation from Equation 3 NUM_SAMPLES iterations.
//计算求和公式3 NUM_SAMPLES迭代。
for (int i = 0; i < NUM_SAMPLES; i++)
{
// Step sample location along ray.
//沿着射线步骤采样位置
texCoord -= deltaTexCoord;
// Retrieve sample at new location.
//在新的位置检索样本。
half3 sample = tex2D(frameSampler, texCoord);
// Apply sample attenuation scale/decay factors.
//应用样品衰减尺度/衰减因子。
sample *= illuminationDecay * Weight;
// Accumulate combined color.
//累计组合颜色
color += sample;
// Update exponential decay factor.
//更新指数衰减因子
illuminationDecay *= Decay;
}
// Output final color with a further scale control factor.
//输出最终颜色与进一步的比例控制因子
return float4( color * Exposure, 1);
} 自己的代码Shader "Lighting/VolumeLight2"
{
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
_VolumeLight ("_VolumeLight (RGB)", 2D) = "black" {}
_LuminanceThreshold ("Luminance Threshold", Float) = 0.5
_BlurSize ("Blur Size", Float) = 1.0
_LightColor("Light Color", color) = (1.0,0.9,0.7)
}
SubShader
{
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
half4 _MainTex_TexelSize;
sampler2D _VolumeLight;
float _LuminanceThreshold;
float _BlurSize;
float4 _ViewPortLightPos;
sampler2D _CameraDepthTexture;
struct v2f {
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
};
v2f vertExtractBright(appdata_img v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
return o;
}
fixed luminance(fixed4 color) {
return
0.2125 * color.r + 0.7154 * color.g + 0.0721 * color.b;
}
fixed4 fragExtractBright(v2f i) : SV_Target {
fixed4 c = tex2D(_MainTex, i.uv);
float linearDepth = Linear01Depth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv));
c *= linearDepth > 0.99f? 1.0f:0.0f;
fixed val = clamp(luminance(c) - _LuminanceThreshold, 0.0, 1.0);
//颜色乘亮度就是高亮
return val;
}
struct v2fOffsetTexture{
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
float2 screenlightDir:TEXCOORD1;
};
float _uvOffsetScale;
//偏移后的光强度
float _Exposure;
v2fOffsetTexture vertOffsetTexture(appdata_img v){
v2fOffsetTexture o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
//float2 aaa = float2(0.8f,1.0f);
//计算屏幕空间像素到光源的向量
o.screenlightDir = _uvOffsetScale * (o.uv - _ViewPortLightPos);
return o;
}
fixed4 fragOffsetTexture(v2fOffsetTexture i): SV_Target{
float NUM_SAMPLES = 12;
//uv偏移幅度
float2 deltaTexCoord = i.screenlightDir * (1.0f / NUM_SAMPLES);
//衰减因子
half illuminationDecay = 1.0f;
half Decay = 0.9f;
half Weight = 1.0f;
float2 texCoord = i.uv;
half3 color = tex2D(_MainTex, texCoord);
for(int i = 0; i < NUM_SAMPLES; i++){
// 沿射线步进采样位置。
texCoord -= deltaTexCoord;
// 在新位置检索样本。
half3 sample = tex2D(_MainTex, texCoord);
// 应用样本衰减比例/衰减因子。
sample *= illuminationDecay * Weight;
// 累积组合颜色。
color += sample;
// 更新指数衰减因子。
illuminationDecay *= Decay;
}
// 输出带有进一步比例控制因子的最终颜色。
return float4( color/NUM_SAMPLES * _Exposure, 1);
}
float3 lightColor = (1.0f,0.9f,0.7f);
struct v2fVolumeLight {
float4 pos : SV_POSITION;
half4 uv : TEXCOORD0;
};
v2fVolumeLight vertVolumeLight(appdata_img v) {
v2fVolumeLight o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv.xy = v.texcoord;
o.uv.zw = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0.0)
o.uv.w = 1.0 - o.uv.w;
#endif
return o;
}
fixed4 fragVolumeLight(v2fVolumeLight i) : SV_Target {
return tex2D(_MainTex, i.uv.xy) + tex2D(_VolumeLight, i.uv.zw);
}
//fixed4 fragVolumeLight(v2fVolumeLight i) : SV_Target {
// return tex2D(_VolumeLight, i.uv.zw);
//}
ENDCG
ZTest Always Cull Off ZWrite Off
Pass {
CGPROGRAM
#pragma vertex vertExtractBright
#pragma fragment fragExtractBright
ENDCG
}
Pass {
CGPROGRAM
#pragma vertex vertOffsetTexture
#pragma fragment fragOffsetTexture
ENDCG
}
Pass {
CGPROGRAM
#pragma vertex vertVolumeLight
#pragma fragment fragVolumeLight
ENDCG
}
}
FallBack Off
}代码分析:代码结构类似unity shader入门精要的Bloom章节shader分为三个passpass1:把渲染后的图拿来提取高亮(灰阶)部分pass2:将亮度图按照图1所示进行径向模糊pass3:将亮度图和原图合并fragExtractBright方法中c *= linearDepth > 0.99f? 1.0f:0.0f;是为了剔除近距离的物体,也就是只需要背景/skybox颜色,原因是因为如果物体本身很亮,那就会把物体的高亮部分一起进行径向模糊,所以要剔除掉,这里就直接变成黑色,参考2.1的nvidia的13.5节上也有说明fixed val = clamp(luminance(c) - _LuminanceThreshold, 0.0, 1.0);_LuminanceThreshold为亮度阈值,用来调节亮度fragOffsetTexture方法为核心方法,也就是步进递增亮度想要效果好,需要处理很多细节,使用限制也多平行光的光源位置不太好确定,需要根据太阳角度计算一下体积光效果需要做一些诸如降采样,随机采样,滤波降噪,使光线不那么闪烁https://www.zhihu.com/video/1499905558950334464https://www.zhihu.com/video/1499905430989664256记录3:RayMarching参考:3.1:Fast, Flexible, Physically-Based Volumetric Light Scattering - NVIDIA Developer3.2:SardineFish:在 Unity 中实现体积光渲染3.3:FOXhunt:游戏开发相关实时渲染技术之体积光3.4:Unity Shader-GodRay,体积光(BillBoard,Volume Shadow,Raidal Blur,Ray-Marching)3.5:希望成为魔法师:UE移动端中的SingleLayerWater3.6:MaxwellGeng:Unity3D实时体积光3.7:MaxwellGeng:Unity3D实时体积光(二)3.8:06.游戏中地形大气和云的渲染(下)
GAMES104-现代游戏引擎:从入门到实践_哔哩哔哩_bilibili实现原理从上面两张图可以看出,当眼睛看向目标时,中间有一段没有光,于是就形成了光柱,我们从眼睛出发,发射一条线,每次前进一点,取该点的亮度,不断前进,直到延伸到物体表面,取得的亮度总和就是该点的体积光强度,换个说法,近平面的每个像素往远平面上射的一条线,这条线步近过程中收集光的强度,最后加起来就是该像素点的体积光强度 fixed4 frag(v2f i):SV_Target {
//四边形上的
当前点的
线性深度值
float linearDepth = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth));
//根据深度得到世界坐标
float3 worldPos = _WorldSpaceCameraPos + linearDepth * i.interpolatedRay.xyz;
//近平面的世界坐标
float3 normalizeRay =
normalize(i.interpolatedRay.xyz);
//
float3 startPos = _WorldSpaceCameraPos + normalizeRay*_NearVectorLength;
float3 startPos = _WorldSpaceCameraPos;
//近平面开始往远平面上的射线
float maxSamplingLength = _FarVectorLength * 0.33;//最大采样距离,只使用前1/3的距离
float3 ray = worldPos - startPos;
float rayLength = length(ray);
float target_length = min(maxSamplingLength, rayLength); //获取线段的目标长度
//终点位置的世界坐标
float3 final = startPos + normalize(ray) * target_length;
float step = 1.0f/_sampleNum;
float atten = 1.0f;
float3 currentPostion = float3(1.0f,0.0f,0.0f);
for(float i = step;i < 1.0f;i+=step){
//更新坐标
currentPostion = lerp(startPos, final, i);
//采样级联阴影,得到该点的阴影值
atten += GetLightAttenuation(currentPostion);
}
//atten先不做处理,直接用来得到效果
float4 resultColor = _LightFinalColor*atten*_scatterDensity;
return resultColor;
}简述下上面代码的步骤1:根据深度图计算得到该pixel的世界坐标2:限制采样最远距离maxSamplingLength,摄像机的世界空间位置作为开始坐标,世界坐标为射线终点3:固定采样次数(不同远近一样的采样,强度不同后面会处理),然后在开始坐标和结束坐标之间做插值,循环累计光强度4:光颜色*光强度*调节强度就是最终的结果了https://www.zhihu.com/video/1506700092380278784大气散射类型Scattering Types1:直径远小于辐射波长的粒子对光的瑞利散射(例如空气分子)2:直径类似或大于入射光波长的粒子对光的米氏散射(例如气溶胶)体积光一般用米氏散射,也就是空气中有灰尘,雾气,才能产生光柱GAMES104第六课放出来的米氏散射方程左边的Scatterring Coefficient,是以一个标准大气压的空气密度N和海拔高度h为参数的一个式子,一般人在地面的时候,这可以视为一个常数,海拔和空气密度几乎固定,右边Phase Function,参数g可以改变散射的方向,范围[-1,1],当为0时,整个式子会退化成瑞利散射方程,当g>0时,则整体朝光源方向集中,越接近1则越集中实际使用中的公式为Mie Scattering (Henyey-Greenstein phase function)UE4中用到的是它的快速实现(Schlick phase function)当g=0.5时原方程和Schlick方程,形状几乎一样,就是方向相反,使用的时候需要注意一下#define PI 3.14159265359f
#define MieScattering(cosAngle, g) g.w * (g.x / (pow(g.y - g.z * cosAngle, 1.5)))
#define MieScattering2(cosAngle, k) (1-k*k)/(4*PI*(1+k*cosAngle)*(1+k*cosAngle))
float4 RayMarch(float2 screenPos, float3 rayStart, float3 final, float3 ray, float rate,float rayLength){
float4 vlight = float4(0, 0, 0, 1);
rate *=
_scatterDensity;
float3 step = 1.0f/_sampleNum;
float cosAngle = dot(_LightDir.xyz, ray);
float transmittance = 1.0f;
float stepSize = rayLength/_sampleNum;
for(float i = step.x;i < 1.0f;i+=step.x){
//更新坐标
float3 currentPostion = lerp(rayStart, final, i);
float atten = GetLightAttenuation(currentPostion);
//用米氏散射计算当前点的光的贡献
atten *= MieScattering2(cosAngle, _MieK);
//rate为rayLength / _MaxRayLength的值
//ScatterStep函数用这个比值来平衡每次步进的atten的强度,达到均匀强度的目的,并加到vlight上
vlight = ScatterStep(vlight.rgb, vlight.a, atten, rate);
}
// apply light's color
vlight.rgb *= _LightFinalColor;
vlight.a = saturate(vlight.a);
return vlight;前面的代码中把RayMarch部分提取出来,加上米氏散射和均匀采样光强度float4 ScatterStep(float3 accumulatedLight, float accumulatedTransmittance, float sliceLight, float sliceDensity)
{
sliceDensity = max(sliceDensity, 0.000001);
float
sliceTransmittance = exp(-sliceDensity / _sampleNum);
// Seb Hillaire's improved transmission by calculating an integral over slice depth instead of
// constant per slice value. Light still constant per slice, but that's acceptable. See slide 28 of
// Physically-based & Unified Volumetric Rendering in Frostbite
// http://www.frostbite.com/2015/08/physically-based-unified-volumetric-rendering-in-frostbite/
float3 sliceLightIntegral = sliceLight * (1.0 - sliceTransmittance) / sliceDensity;
accumulatedLight += sliceLightIntegral * accumulatedTransmittance;
accumulatedTransmittance *= sliceTransmittance;
return float4(accumulatedLight, accumulatedTransmittance);
}ScatterStep这个函数属实没看懂,虽然中间有作者的说明链接 ┑( ̄Д  ̄)┍调节g值即可看到以下变化https://www.zhihu.com/video/1506777372900204545优化部分1:降低采样率RenderTexture volumeLightTexture = new RenderTexture(width / downSample, height / downSample, 0, RenderTextureFormat.ARGBHalf);创建的RenderTexture 的时候,直接缩小,这样处理的像素就会少很多,做bloom类效果的时候,还能起到模糊的作用2:降低采样数,sample Num 降为103:uv随机偏移#define highQualityRandom(seed) cos(sin(seed * float2(641.5467987313875, 3154.135764) + float2(1.943856175, 631.543147)) * float2(4635.4668457, 84796.1653) + float2(6485.15686, 1456.3574563))
float2 uv = i.uv;
float2 randomOffset = highQualityRandom((_ScreenParams.y * uv.y + uv.x) * _ScreenParams.x + _RandomNumber) * _JitterOffset;
uv += randomOffset;随机偏移之后产生闪烁https://www.zhihu.com/video/15067807324958105604:偏移每次采样的位置#define random(seed) sin(seed * float2(641.5467987313875, 3154.135764) + float2(1.943856175, 631.543147))
float3 step = 1.0f/_sampleNum;
step.yz *= float2(0.25, 0.2);
float2 seed = random((_ScreenParams.y * screenPos.y + screenPos.x) * _ScreenParams.x + _RandomNumber);
for(float i = step.x;i < 1.0f;i+=step.x){
seed = random(seed);
float lerpValue =
i + seed.y* step.y + seed.x * step.z;
//更新坐标
float3 currentPostion = lerp(rayStart, final, lerpValue);
...
}seed随机两个值,都是0,1范围,_sampleNum为10的话,那么step.y=0.1*0.25=0.025,step.z=0.1*0.2=0.02random为一个三角函数,所以取值范围为[-1,1]seed.y* step.y + seed.x * step.z的取值范围为[-0.045,0.045],没去步进,采样的位置加上该值,以达到随机采样的效果https://www.zhihu.com/video/1506786422590152704可以看到体积光的效果出来了,但是闪的更厉害了5:降噪,用联合双边滤波进行降噪,其实这里并没有用到颜色对比,严格来说应该叫联合滤波?高斯滤波公式G(x,y)=\frac{1}{2*\pi*\sigma^{2}}*e^{-\frac{x^{2}+y^{2}}{2*\sigma^{2}}} σ为标准方差(一般取值1)当高斯滤波不仅考虑距离影响因素,还考虑颜色差别的时候,就称为双边滤波(Bilateral filtering)可以用来使高频信息不丢失当再继续考虑深度区别,法线区别等等影响因素的时候,我们就称为联合双边滤波(Joint Bilateral filtering)我们目标是降噪,所以,原则上来说,只要是类似高斯滤波的这种曲线,都能使用所以,高斯滤波公式前面的部分不重要,滤波核部分,2*σ^2,改成1*σ^2也没啥问题下面是麦老师项目中的代码,未改过//-----------------------------------------------------------------------------------------
// GaussianWeight
//-----------------------------------------------------------------------------------------
#define PI 3.14159265359f
#define GaussianWeight(offset, deviation2) (deviation2.y * exp(-(offset * offset) / (deviation2.x)))
//-----------------------------------------------------------------------------------------
// BilateralBlur
//-----------------------------------------------------------------------------------------
float4 BilateralBlur(float2 uv, const float2 direction, Texture2D depth, SamplerState depthSampler, const int kernelRadius, const float kernelWeight)
{
//const float deviation = kernelRadius / 2.5;
const float dev = kernelWeight / GAUSS_BLUR_DEVIATION; // make it really strong
const float dev2 = dev * dev * 2;
const float2 deviation = float2(dev2, 1.0f / (dev2 * PI));
float4 centerColor = _MainTex.Sample(sampler_MainTex, uv);
float3 color = centerColor.xyz;
//return float4(color, 1);
float centerDepth = (LinearEyeDepth(depth.Sample(depthSampler, uv)));
float weightSum = 0;
// gaussian weight is computed from constants only -> will be computed in compile time
float weight = GaussianWeight(0, deviation);
color *= weight;
weightSum += weight;
[unroll] for (int i = -kernelRadius; i < 0; i += 1)
{
float2 offset = (direction * i);
float3 sampleColor = _MainTex.Sample(sampler_MainTex, uv, offset);
float sampleDepth = (LinearEyeDepth(depth.Sample(depthSampler, uv, offset)));
float depthDiff = abs(centerDepth - sampleDepth);
float dFactor = depthDiff * BLUR_DEPTH_FACTOR; //Should be 0.5
float w = exp(-(dFactor * dFactor));
// gaussian weight is computed from constants only -> will be computed in compile time
weight = GaussianWeight(i, deviation) * w;
color += weight * sampleColor;
weightSum += weight;
}
[unroll] for (i = 1; i <= kernelRadius; i += 1)
{
float2 offset = (direction * i);
float3 sampleColor = _MainTex.Sample(sampler_MainTex, uv, offset);
float sampleDepth = (LinearEyeDepth(depth.Sample(depthSampler, uv, offset)));
float depthDiff = abs(centerDepth - sampleDepth);
float dFactor = depthDiff * BLUR_DEPTH_FACTOR; //Should be 0.5
float w = exp(-(dFactor * dFactor));
// gaussian weight is computed from constants only -> will be computed in compile time
weight = GaussianWeight(i, deviation) * w;
color += weight * sampleColor;
weightSum += weight;
}
color /= weightSum;
return float4(color, centerColor.w);
}
代码里的深度信息判断的时候,公式改的比较多,滤波核变成了e^(-factor^2),丢掉了分母部分由于一个二维个高斯函数可以写成两个一维的高斯函数的乘积但是,从双边滤波开始往上考虑更复杂的信息的时候,理论上已经不能够拆分了,但是强行使用问题也不大direction参数为方向,横向算一遍,纵向算一遍,n*n的遍历次数直接变成n+n了效果https://www.zhihu.com/video/1506789060979408896sampleNum加到50,sampleNum值越大,闪烁越不明显https://www.zhihu.com/video/1506789601780363264和原图进行混合https://www.zhihu.com/video/1506791606628171776Unity的Post-Processing有TAA功能,也可拿来降噪上面代码绝大部分都是来自麦老师的项目代码链接待续...
有玩过《杀戮地带:暗影坠落》的小伙伴可能会对游戏里的体积光影有很深刻的印象,感觉开发人员在设计画面的时候非常想要突出体积光以及横向的高光,使得游戏画面很有特色。其使用的体积光影之多甚至让我产生了“光污染”的感觉,本文就来简单的实现一下这种朦胧的效果。刚接触渲染时就被下面这些大佬的实现惊艳到,这里也用到了许多里面的技巧。介绍:光线传播时照射到尘埃或者水汽会产生散射效果,产生雾蒙蒙的感觉。本文介绍在前向渲染下简单的实现平行光的体积光效果,采用了后处理的方案。主要思路:通过光线步进(ray marching)将点/坐标沿着视线前进,每次前进判断该点是否在阴影中,这样经过若干次的前进,我们就能知道当前像素点在看到最终位置时会经过怎样的光影信息,这样就可以对该像素的明暗进行操作,在光或者影上进行操作。实现:先预览下开启体积光影的效果,对比如下:原场景体积光体积阴影体积光+体积阴影1.采样阴影信息首先我们需要一个方法来知道某个点是否是处于阴影中,对于平行光的话如果了解unity自带shadowmap的实现就能知道可以通过对比灯光空间的shadowmapTexture深度和该点在灯光空间的深度作比较来获得阴影信息,可以参考以下资料:需要注意这里阴影级联 (Shadow Cascade)选择了无,注意如果开启了多个的话采样阴影算法需要调整,大家可以自行修改。下图是输出的是当前像素对应的阴影信息,和直接看到默认的阴影信息一样。之后我们只需每个点都照这样获取阴影信息就行。2.进行步进ray marching参考资料如下:将得到的每个点的信息相加,比如光就是一层一层叠加,阴影就一层一层减,能得到了当前像素光线经过的光影信息,效果如下:这里的步进策略直接取了等距的步长,有其他策略的话也可以带来更好的表现。3.优化性能虽然我们已经能得到光影的信息,但是想要得到效果好的表现需要进行大量步进,上图是256次步进的效果,而每次步进就意味着这个像素要采样一次,这样的消耗是极大的。针对此情况杀戮地带的开发商Guerrilla Games就分享了一套down sample+dither+blur的组合拳来优化性能。dither贴图扰动采样点使用dither扰动并步进16次效果如下:再加上降采样和模糊,不仅降低开销还能产生朦胧感:4.合成最终图像https://www.zhihu.com/video/1203642798156890112源码:c#部分:using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class VolumetricShadow : MonoBehaviour {
private Matrix4x4 frustumCorners = Matrix4x4.identity;
private Transform camTransform;
private Camera cam;
private RenderTexture marchingRT;
private RenderTexture tempRT;
public Material mat;
[Range(0,5)]
public int downSample=2;
[Range(0f, 5f)]
public float samplerScale = 1f;
[Range(0,256)]
public int rayMarchingStep=16;
[Range(0f,100f)]
public float maxRayLength=15f;
[Range(0f, 2f)]
public float volumetricLightIntenstiy = 0.05f;
[Range(0f, 2f)]
public float lightScatteringFactor = 0.5f;
[Range(0f, 5f)]
public float volumetricShadowIntenstiy = 0f;
[Range(0f, 0.1f)]
public float shadowAttenuation = 0.08f;
[Range(0f, 1f)]
public float minShadow = 0.5f;
void Start () {
camTransform = transform;
cam = GetComponent<Camera>();
cam.depthTextureMode = DepthTextureMode.Depth;
mat.SetTexture("_DitherMap", GenerateDitherMap());
}
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
//field of view
float fov = cam.fieldOfView;
//近裁面距离
float near = cam.nearClipPlane;
//横纵比
float aspect = cam.aspect;
//近裁面一半的高度
float halfHeight = near * Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad);
//向上和向右的向量
Vector3 toRight = cam.transform.right * halfHeight * aspect;
Vector3 toTop = cam.transform.up * halfHeight;
//分别得到相机到近裁面四个角的向量
//depth/dist=near/|topLeft
//dist=depth*(|TL|/near)
//scale=|TL|/near
Vector3 topLeft = camTransform.forward * near + toTop - toRight;
float scale = topLeft.magnitude / near;
topLeft.Normalize();
topLeft *= scale;
Vector3 topRight = camTransform.forward * near + toTop + toRight;
topRight.Normalize();
topRight *= scale;
Vector3 bottomLeft = camTransform.forward * near - toTop - toRight;
bottomLeft.Normalize();
bottomLeft *= scale;
Vector3 bottomRight = camTransform.forward * near - toTop + toRight;
bottomRight.Normalize();
bottomRight *= scale;
frustumCorners.SetRow(0, bottomLeft);
frustumCorners.SetRow(1, bottomRight);
frustumCorners.SetRow(3, topRight);
frustumCorners.SetRow(2, topLeft);
mat.SetMatrix("_FrustumCornorsRay", frustumCorners);
mat.SetInt("_RayMarchingStep", rayMarchingStep);
mat.SetFloat("_MaxRayLength", maxRayLength);
mat.SetFloat("_VolumetricLightIntensity", volumetricLightIntenstiy);
mat.SetFloat("_VolumetricShadowIntenstiy", volumetricShadowIntenstiy);
mat.SetFloat("_ScatteringFactor", lightScatteringFactor);
mat.SetFloat("_MinShadow", minShadow);
mat.SetFloat("_ShadowAttenuation", shadowAttenuation);
marchingRT = RenderTexture.GetTemporary(Screen.width >> downSample, Screen.height >> downSample, 0, source.format);
tempRT = RenderTexture.GetTemporary(Screen.width >> downSample, Screen.height >> downSample, 0, source.format);
//计算阴影
Graphics.Blit(source, marchingRT, mat, 0);
//模糊阴影信息
mat.SetVector("_Offsets", new Vector4(0, samplerScale, 0, 0));
Graphics.Blit(marchingRT, tempRT,mat,1);
mat.SetVector("_Offsets", new Vector4(samplerScale, 0, 0, 0));
Graphics.Blit(tempRT, marchingRT, mat, 1);
mat.SetVector("_Offsets", new Vector4(0, samplerScale, 0, 0));
Graphics.Blit(marchingRT, tempRT, mat, 1);
mat.SetVector("_Offsets", new Vector4(samplerScale, 0, 0, 0));
Graphics.Blit(tempRT, marchingRT, mat, 1);
//合并
mat.SetTexture("_MarchingTex", marchingRT);
Graphics.Blit(source, destination, mat, 2);
RenderTexture.ReleaseTemporary(marchingRT);
RenderTexture.ReleaseTemporary(tempRT);
}
//Guerrilla Games 分享 DitherMap
private Texture2D GenerateDitherMap()
{
int texSize = 4;
Texture2D ditherMap = new Texture2D(texSize, texSize, TextureFormat.Alpha8, false, true);
ditherMap.filterMode = FilterMode.Point;
Color32[] colors = new Color32[texSize * texSize];
colors[0] = GetDitherColor(0.0f);
colors[1] = GetDitherColor(8.0f);
colors[2] = GetDitherColor(2.0f);
colors[3] = GetDitherColor(10.0f);
colors[4] = GetDitherColor(12.0f);
colors[5] = GetDitherColor(4.0f);
colors[6] = GetDitherColor(14.0f);
colors[7] = GetDitherColor(6.0f);
colors[8] = GetDitherColor(3.0f);
colors[9] = GetDitherColor(11.0f);
colors[10] = GetDitherColor(1.0f);
colors[11] = GetDitherColor(9.0f);
colors[12] = GetDitherColor(15.0f);
colors[13] = GetDitherColor(7.0f);
colors[14] = GetDitherColor(13.0f);
colors[15] = GetDitherColor(5.0f);
ditherMap.SetPixels32(colors);
ditherMap.Apply();
return ditherMap;
}
private Color32 GetDitherColor(float value)
{
byte byteValue = (byte)(value / 16.0f * 255);
return new Color32(byteValue, byteValue, byteValue, byteValue);
}
}
shader部分:Shader "Unlit/VolumetricShadow"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
//1.ray marching && get shadow info
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "AutoLight.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float4 interpolatedRay:TEXCOORD2;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float _ScatteringFactor;
float4x4 _FrustumCornorsRay;
sampler2D _CameraDepthTexture;
sampler2D _ShadowMapTexture;
sampler2D _DitherMap;
int _RayMarchingStep;
float _MaxRayLength;
float _VolumetricLightIntensity;
float _VolumetricShadowIntenstiy;
float _MinShadow;
float _ShadowAttenuation;
//重映射
float Remap(float x,float from1,float to1,float from2,float to2) {
return (x - from1) / (to1 - from1) * (to2 - from2) + from2;
}
//判断该点是否在阴影
float2 GetShadow(float3 worldPos) {
//比较灯光空间深度
float4 lightPos = mul(unity_WorldToShadow[0], float4(worldPos, 1));
float shadow = UNITY_SAMPLE_DEPTH(tex2Dlod(_ShadowMapTexture, float4(lightPos.xy,0,0)));
float depth = lightPos.z ;
float shadowValue = step(shadow, depth);
//阴影的衰减
float dis = abs(depth - shadow);
shadowValue += clamp(Remap(dis, _ShadowAttenuation,0.1,0,1),0,1)*(1-shadowValue);
return shadowValue;
}
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
//四个顶点对应的相机近裁面向量
int index = step(0.5, v.uv.x) + step(0.5, v.uv.y)*2;
//int index = 0;
/*if (v.uv.x < 0.5&&v.uv.y < 0.5)
{
index = 0;
}
else if (v.uv.x > 0.5&&v.uv.y < 0.5) {
index = 1;
}
else if (v.uv.x > 0.5&&v.uv.y > 0.5) {
index = 2;
}
else {
index = 3;
}*/
o.interpolatedRay = _FrustumCornorsRay[index];
return o;
}
fixed4 frag (v2f i) : SV_Target
{
//获得世界坐标
float depthTextureValue = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv);
float linearEyeDepth = LinearEyeDepth(depthTextureValue);
//限制获取到的最远距离
linearEyeDepth = clamp(linearEyeDepth,0, _MaxRayLength);
float3 worldPos = _WorldSpaceCameraPos + linearEyeDepth * i.interpolatedRay.xyz;
float vShadow = 1;
float vLight = 0;
float3 rayOri = _WorldSpaceCameraPos;
float3 rayDir = i.interpolatedRay.xyz;
float disCam2World = length(worldPos - _WorldSpaceCameraPos);
//dither扰动采样点
float2 offsetUV = fmod(floor(i.vertex.xy), 4.0);
float ditherValue = tex2D(_DitherMap, offsetUV*0.25).a;
rayOri += ditherValue * rayDir;
//防止背光时也产生影响
float3 toLight = normalize(_WorldSpaceLightPos0);
float dotLightRayDir = dot(toLight, rayDir)*0.5 + 0.5;
float scatteringLight = smoothstep(0.5, 1, dotLightRayDir);
float3 currentPos;
//固定的步数得到步长
float marchStep = disCam2World / _RayMarchingStep;
UNITY_LOOP
for (int j = 0; j < _RayMarchingStep; j++)
{
currentPos = rayOri + i.interpolatedRay.xyz * marchStep * j;
float disCam2Current = length(currentPos- _WorldSpaceCameraPos);
//对比光线是否超过了深度
float outOfRange = step(disCam2Current, disCam2World);
//if (disCam2World>disCam2Current)
//{
float getShadow = GetShadow(currentPos);
vShadow -= (1- getShadow) * _VolumetricShadowIntenstiy
/ _RayMarchingStep * (j+3)/_RayMarchingStep * outOfRange;
vLight += getShadow * _VolumetricLightIntensity * scatteringLight / _RayMarchingStep * (j-3)/_RayMarchingStep * outOfRange;
//}
//else
//{
// break;
//}
}
vShadow = clamp(vShadow, _MinShadow, 1);
vLight = pow(clamp(vLight, 0, 1),_ScatteringFactor);
float4 col = float4(vLight, vShadow, 0, 1);
return col;
}
ENDCG
}
//2.blur
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float4 uv01 : TEXCOORD1;
float4 uv23 : TEXCOORD2;
float4 uv45 : TEXCOORD3;
};
sampler2D _MainTex;
float4 _MainTex_TexelSize;
float4 _Offsets;
v2f vert(appdata v) {
v2f o;
_Offsets *= _MainTex_TexelSize.xyxy;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.uv01 = v.uv.xyxy + _Offsets.xyxy*float4(1, 1, -1, -1);
o.uv23 = v.uv.xyxy + _Offsets.xyxy*float4(1, 1, -1, -1)*2.0;
o.uv45 = v.uv.xyxy + _Offsets.xyxy*float4(1, 1, -1, -1)*3.0;
return o;
}
float4 frag(v2f i) :SV_Target{
float4 color = float4(0,0,0,0);
color += 0.40*tex2D(_MainTex, i.uv);
color += 0.15*tex2D(_MainTex, i.uv01.xy);
color += 0.15*tex2D(_MainTex, i.uv01.zw);
color += 0.10*tex2D(_MainTex, i.uv23.xy);
color += 0.10*tex2D(_MainTex, i.uv23.zw);
color += 0.05*tex2D(_MainTex, i.uv45.xy);
color += 0.05*tex2D(_MainTex, i.uv45.zw);
return color;
}
ENDCG
}
//3.combine
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
sampler2D _MainTex;
sampler2D _MarchingTex;
v2f vert(appdata v) {
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
float4 frag(v2f i) :SV_Target{
float4 finalColor = 1;
float4 ori = tex2D(_MainTex,i.uv);
float4 marching = tex2D(_MarchingTex, i.uv);
finalColor.rgb = clamp(ori.rgb + marching.r*_LightColor0.rgb,0,1) * marching.g;
return finalColor;
}
ENDCG
}
}
}
体积光在现实世界中也叫丁达尔光。如下图
体积光在网上也有很多个版本,但是大多数是 以
光照贴图作为 参考最想进行比较。 这样的尽管可以很轻松方便的实现出 体积光,但是他的硬伤确实 精细度太低了,完全无法复原出 体积光该有的 朦胧的感觉。(如下)https://blog.csdn.net/weixin_32938207/article/details/113035729
实现效果如下黄昏https://www.zhihu.com/video/1529602677965553664清晨https://www.zhihu.com/video/1529606105693220864光线模拟https://www.zhihu.com/video/1529834347015876608
其基本原理很简单 ,网上的原理是根据光照深度map于当前的世界坐标点做对比,如果高于 光照深度map,就进行采样计算光照。呢么同理,我们是需要把光照深度map替换成自定义的太阳Depth图。这么一说可能跨度有点大,那么下面我们一步一步仔细给大家讲解实现过程。
丁达尔光线物理原理丁达尔特效光是什么当一束光线透过胶体,从入射光的垂直方向可以观察到胶体里出现的一条光亮的“通路”,其原理是光被悬浮的胶体粒子所散射。所以胶体粒子散射入射光产生了肉眼可见的光路,被称作丁达尔效应。丁达尔效应的发现约翰·丁达尔是英国著名物理学家。他于1869 年发现了丁达尔效应,也是首次解释了为什么天空是蓝色的科学家。当光线射向分散体系时,只有一部分光能够通过体系,剩余部分则被反射、散射或吸收。体系内物质的化学组成决定了光的吸收量,而体系的分散程度决定了光的散射和反射量。当分散相粒子直径大于入射光的波长时,主要发生光的反射和折射;当入射光照射到直径小于光波长的分散粒子时,则主要发生散射,这时观察到的是光波环绕微粒而向四周放射的光,称为散射光或乳光。丁达尔效应 就是光的散射现象或称乳光现象。这里 我们可以发现,我们要实现体积光找,我们必须沿着视角方向的光路,进行逐段落采样,获取该光点的光线信息(光的吸收量,散射和反射量等)自定义太阳
我们获取深度图的第一个方式 就是通过摄像机获取他的深度贴图,这是我们要注意性能的效果,但是获取深度贴图是一个比较耗时的操作,我们不能直接通过depthTextureMode获取深度图,毕竟代价很大,所以我们需要自己申请两个rendertexture,用来存储从GPU传过来的深度贴图和颜色贴图
void Start()
{
colorRT = new RenderTexture(Screen.width, Screen.height, 24, RenderTextureFormat.Default);
depthRT = new RenderTexture(Screen.width, Screen.height, 0, RenderTextureFormat.Depth);
}
private void OnPreRender()
{
cam.SetTargetBuffers(colorRT.colorBuffer, depthRT.depthBuffer);
}自定义太阳视椎体平面提取若通过解析方法计算射线与探照灯的投影锥体表面交点,需要首先得到该锥形体的平面方程。Gribb 等人提出了通过投影矩阵快速得到投影平锥体6个平面方程的方法[5],这里简单介绍一下其思路。设从世界空间到摄像机观察空间的矩阵为 V,投影矩阵 P,我们令 M=PV 为摄像机的 ViewProjection 矩阵。对于空间中任意一点 p = (x, y, z, 1),通过与 ViewProjection 矩阵相乘得到其在裁剪空间下的坐标,并经过齐次除法后得到 NDC 坐标。如果我们令 ViewProjection 矩阵中4的4行分别为 m_1, m_2, m_3, m_4 ,即 M = \begin{bmatrix} m_1 \\ m_2 \\ m_3 \\ m_4 \ \end{bmatrix} 那么 p 在裁剪空间下的坐标为 \begin{aligned} p_{clip} &= (x', y', z', w') \\
&= M \cdot p \\
&= (m_1 \cdot p, m_2 \cdot p, m_3 \cdot p, m_4 \cdot p)
\end{aligned} \tag{10} p 在 NDC 空间下的坐标可以通过下式计算 \begin{aligned}
p_{NDC} &= \frac{p_{clip}}{w'}
= (\frac{m_1 \cdot p}{m4 \cdot p},
\frac{m_2 \cdot p}{m4 \cdot p},
\frac{m_3 \cdot p}{m4 \cdot p},
1) \end{aligned} \tag{11} 不妨以左裁剪平面为例,其平面方程在 NDC 空间下为 x_{NDC} = -1 ,即 \begin{aligned} & x_{NDC} = \frac{m_1 \cdot p}{m_4 \cdot p} = -1 \\ \\
& \Rightarrow \quad m_1 \cdot p = -(m_4 \cdot p) \\ \\
& \Rightarrow \quad (m_1 + m_4) \cdot p = 0 \end{aligned} \tag{12} 即是一个平面的标准方程 Ax + By + Cz + D = 0 的向量形式 (A, B, D, D) \cdot (x, y, z, 1) = 0 ,而平面的法向量即是 \vec{n}=(A, B, C) ,也可以直接从该式得到,但该式并没有考虑法向量的正负方向。
采样频率最为一个 很重要的性能消耗指标,采样频率直接影响到了 游戏的帧率。我们在沿视角方向的光路 进行采样的时候,会矩阵转换,深度图提取,光照衰减,以及强度等运算,所以一个很简单的想法就是 减少光路的采样频率,但是随之而来的就是很严重的失真效果5次采样的结果呢我们是否可以通过模糊的方式来减少 因为采样过低二造成的梯度显示效果的呢?Blue NoiseBlue Noise 是一种高频的随机噪声。但 Blue Noise 无法实时生成。开发者 Chistoph Petter 的博客中对 Blue Noise 做了更深入的介绍,在其中也能找到他生成的各种格式和尺寸的 Blue Noise Texture (CC0 授权)http://momentsingraphics.de/BlueNoise.html此外也有通过去除 White Noise 中的低频成分得到高频的 Blue Noise 的近似实现,此方法也能用于生成各种颜色的噪声(https://blog.demofox.org/2017/1但是很不幸的消息是,做过对应的早点随机采样之后,梯度的效果会消失,但是随之而来的确实黑白不同的颗粒感,如上图所以。所以我们这时候需要进行后处理大气模糊的效果可以参考https://blog.csdn.net/weixin_51327051/article/details/123031360。最终效果如下图随机和模糊后总结
性能方面的消耗还是有的 ,毕竟需要向真实世界的渲染效果方向努力,所以作为给硬件擦屁股的优化指标,也要把我折磨的快疯掉了(不断的在性能优化 和 渲染效果取最优解)。

我要回帖

更多关于 unity卡 的文章

 

随机推荐