Blur 模糊

URPLearn/Blur

基础概念

图形处理中有一个基础概念: 卷积核(kernel).
kernel是矩阵形式的存在,一个3x3的kernel,3称作KernelSize,将其作用与(x,y)位置的像素,等效于采集(x,y)周围3x3范围的像素值,分别与a ~ i进行加权平均运算。
不同的模糊算法,实质上就是取不同的卷积核。

Box Blur 均值模糊

均值模糊。 即取指定大小(size * size)范围内的像素,相加后取平均值。

性能:n * n
1
2
3
4
5
6
7
8
9
10
11
12
half4 BoxBlur(Texture2D tex, float2 pixelCoord, float halfKernelSize){
half4 color = half4(0,0,0,1);
int kernelSize = 2 * halfKernelSize + 1;
float weight = rcp(kernelSize * kernelSize);
for(int i = -halfKernelSize ; i <= halfKernelSize ; i ++){
for(int j = -halfKernelSize ; j <= halfKernelSize ; j ++){
color += LOAD_TEXTURE2D_X(tex,pixelCoord + float2(i,j)) * weight;
}
}
return color ;
}

性能:2 * n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
half4 BoxBlur(Texture2D tex, float2 pixelCoord, float halfKernelSize, float2 offset) {
half4 color = half4(0, 0, 0, 1);
float weight = rcp(2 * halfKernelSize + 1);
for (int i = -halfKernelSize; i <= halfKernelSize; i++) {
color += LOAD_TEXTURE2D_X(tex, pixelCoord + offset * i) * weight;
}
return color;
}
// 水平采样
half4 BoxBlurH(Texture2D tex, float2 pixelCoord, int halfKernelSize, float radiusScale) {
return BoxBlur(tex, pixelCoord, halfKernelSize, float2(radiusScale, 0));
}
// 垂直采样
half4 BoxBlurV(Texture2D tex, float2 pixelCoord, int halfKernelSize, float radiusScale) {
return BoxBlur(tex, pixelCoord, halfKernelSize, float2(0, radiusScale));
}
性能:n + 1 , 思想:中心点往左右两边采样,一次循环采样2次
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
half4 BoxBlurBilinear(Texture2D tex, sampler linearSampler, float2 uv, int halfKernelSize, float2 offset) {
half4 color = half4(0, 0, 0, 1);
float weight = rcp(halfKernelSize * 2 + 1);
if (halfKernelSize % 2 == 0) { //even
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv) * weight;
int quartKernelSize = floor(halfKernelSize / 2);
for (int i = 1; i <= quartKernelSize; i++) {
float uvOffset = (i * 2 - 0.5);
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv + offset * uvOffset) * 2 * weight;
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv - offset * uvOffset) * 2 * weight;
}
}
else { //odd
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv + 0.75 * offset) * 1.5 * weight;
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv - 0.75 * offset) * 1.5 * weight;
int quartKernelSize = floor((halfKernelSize - 1) / 2);
for (int i = 1; i <= quartKernelSize; i++) {
float uvOffset = (i * 2 + 0.5);
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv + offset * uvOffset) * 2 * weight;
color += SAMPLE_TEXTURE2D_X(tex, linearSampler, uv - offset * uvOffset) * 2 * weight;
}
}
return color;
}

#define BOX_BLUR_BILINEAR(tex,uv,halfKernelSize,offset) BoxBlurBilinear(tex,sampler_LinearClamp,uv,halfKernelSize,offset)

// 水平采样 在水平模糊Pass时,令offset = (1 / textureWidth, 0);
float4 FragH(Varyings i) : SV_Target
{
#if _BilinearMode
return BOX_BLUR_BILINEAR(_MainTex,i.uv,_KernelSize,float2(_BlurScale,0) * _MainTex_TexelSize.xy);
#else
return BoxBlur(_MainTex,i.uv * _MainTex_TexelSize.zw,_KernelSize,float2(_BlurScale,0));
#endif
}
// 垂直采样 在垂直模糊Pass时,令offset = (0, 1 / textureHeight);
float4 FragV(Varyings i) : SV_Target
{
#if _BilinearMode
return BOX_BLUR_BILINEAR(_MainTex,i.uv,_KernelSize,float2(0,_BlurScale) * _MainTex_TexelSize.xy);
#else
return BoxBlur(_MainTex,i.uv * _MainTex_TexelSize.zw,_KernelSize,float2(0,_BlurScale));
#endif
}

Gaussian Blur 高斯模糊

不同于均值模糊,高斯模糊使用正态分布来为周围的像素分配权重。
这里有一个网站,可以计算高斯模糊采用的卷积核: gaussian-kernel-calculator
要确定一个高斯卷积核,需要提供两个参数: sigma 和 kernelSize
kernelSize我们前面已经说了,sigma则是正态分布公式中的标准差。sigma的值越小,正态分布曲线越尖锐,反之则越平坦。
因此,对于固定kernelSize的高斯模糊算子,取的sigma越大,则结果越模糊

1
2
3
4
5
6
7
8
9
10
11
12
///kernel size = 7,sigma = 1
half4 GaussianBlur7Tap(Texture2D tex, float2 pixelCoord, float2 offset) {
half4 color = half4(0, 0, 0, 0);
color += 0.383103 * LOAD_TEXTURE2D_X(tex, pixelCoord);
color += 0.241843 * LOAD_TEXTURE2D_X(tex, pixelCoord + offset);
color += 0.241843 * LOAD_TEXTURE2D_X(tex, pixelCoord - offset);
color += 0.060626 * LOAD_TEXTURE2D_X(tex, pixelCoord + offset * 2);
color += 0.060626 * LOAD_TEXTURE2D_X(tex, pixelCoord - offset * 2);
color += 0.00598 * LOAD_TEXTURE2D_X(tex, pixelCoord + offset * 3);
color += 0.00598 * LOAD_TEXTURE2D_X(tex, pixelCoord - offset * 3);
return color;
}
没看懂
1
2
3
4
5
6
7
8
9
10
///kernel size = 7,sigma = 1
half4 GaussianBlur7TapBilinear(Texture2D tex, sampler texSampler, float2 uv, float2 offset) {
half4 color = half4(0, 0, 0, 0);
color += 0.4333945 * SAMPLE_TEXTURE2D_X(tex, texSampler, uv + offset * 0.558020);
color += 0.4333945 * SAMPLE_TEXTURE2D_X(tex, texSampler, uv - offset * 0.558020);
color += 0.066606 * SAMPLE_TEXTURE2D_X(tex, texSampler, uv + offset * 2.089782);
color += 0.066606 * SAMPLE_TEXTURE2D_X(tex, texSampler, uv - offset * 2.089782);
return color;
}
#define GAUSSIAN_BLUR_7TAP_BILINEAR(tex,uv,offset) GaussianBlur7TapBilinear(tex,sampler_LinearClamp,uv,offset)

Bloom 全屏泛光

基础理论

让超过一定亮度的像素点变的更亮,并向四周扩散。
实现原理:

  1. 拿到源RT,进行过滤,过滤规则可以颜色值大于一定阈值就写入,小于则不写入,得到BloomRT
  2. BloomRT进行模糊操作(向四周扩散)
  3. 将BloomRT叠加到源RT上(合并)

代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;

namespace URPLearn
{
/// <summary>
/// 全屏泛光
/// 1.降采样
/// 2.高斯模糊
/// 3.颜色叠加
/// 参数:threshold(阈值),Intensity(强度),Scatter(散射)
/// </summary>
[CreateAssetMenu(menuName = "URPLearn/Bloom")]
public class Bloom : PostProcessingEffect
{
/// <summary>
/// Shader
/// </summary>
public Shader shader;

/// <summary>
/// 阈值
/// </summary>
[Range(0, 1)]
public float threshold;

/// <summary>
/// 降采样
/// </summary>
[Range(1, 4)]
public int downSample = 1;

/// <summary>
/// 模糊次数
/// </summary>
[Range(1, 10)]
public int blurIterations = 1;

/// <summary>
/// 强度
/// </summary>
public float intensity;

/// <summary>
/// 散射
/// </summary>121
public float scatter;

/// <summary>
/// 颜色
/// </summary>
public Color tint;

/// <summary>
/// 材质
/// </summary>
private Material _material;

/// <summary>
/// 模糊处理
/// </summary>
private BlurBlitter _blurBlitter = new BlurBlitter();

public override void Render(CommandBuffer cmd, ref RenderingData renderingData, PostProcessingRenderContext context)
{
if (!shader)
{
return;
}
if (_material == null)
{
_material = new Material(shader);
}

_material.SetFloat("_Threshold", threshold);
_material.SetColor("_Tint", tint);
_material.SetFloat("_Intensity", intensity);

var descriptor = context.sourceRenderTextureDescriptor;

var temp1 = context.GetTemporaryRT(cmd, descriptor, FilterMode.Bilinear);

//first pass,提取光亮部分
cmd.Blit(context.activeRenderTarget, temp1, _material, 0);

//模糊处理
_blurBlitter.SetSource(temp1, descriptor);

_blurBlitter.downSample = downSample;
_blurBlitter.iteratorCount = blurIterations;
_blurBlitter.blurType = BlurType.Box;

_blurBlitter.Render(cmd);

cmd.SetGlobalTexture("_BloomTex", temp1);

//combine
context.BlitAndSwap(cmd, _material, 3);

context.ReleaseTemporaryRT(cmd, temp1);
}
}
}


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
Shader "URPLearn/PostProcessing/Bloom"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
ZTest Always ZWrite Off Cull Off
Tags { "RenderType" = "Opaque" "RenderPipeline" = "UniversalPipeline"}
HLSLINCLUDE
// 宏定义 Material.EnableKeyword
#pragma shader_feature _BloomDebug

#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Filtering.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Input.hlsl"
#include "../Blur/Blur.hlsl"

// 源RT
TEXTURE2D_X(_MainTex);
SAMPLER(sampler_MainTex);
// BloomRT
TEXTURE2D_X(_BloomTex);
SAMPLER(sampler_BloomTex);

CBUFFER_START(UnityPerMaterial)
float4 _MainTex_TexelSize;
float4 _Tint;
float _Threshold;
float _Intensity;
float _KernelSize;
float _BlurScale;
CBUFFER_END

struct Attributes
{
float4 positionOS : POSITION;
float2 uv : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};

struct Varyings
{
float4 positionHS : SV_POSITION;
float2 uv : TEXCOORD0;
UNITY_VERTEX_OUTPUT_STEREO
};

float4 SampleColor(float2 uv) {
return SAMPLE_TEXTURE2D(_MainTex, sampler_MainTex, uv);
}

Varyings Vert(Attributes input)
{
Varyings output;
UNITY_SETUP_INSTANCE_ID(input); //为支持GPUInstance?
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(output); //將output变量初始化
output.positionHS = TransformObjectToHClip(input.positionOS); //模型空间变化到齐次裁剪空间
output.uv = input.uv;
return output;
}

/// 通过阈值获取高亮区域
float4 FragGetLight(Varyings i) :SV_Target{
float4 color = SampleColor(i.uv);
float luminance = dot(float3(0.299,0.587,0.114),color.rgb);
return color * clamp(luminance - _Threshold, 0, 1) * _Intensity;
}

///水平blur
float4 FragBlurH(Varyings i) : SV_Target
{
return GaussianBlur7Tap(_MainTex,i.uv * _MainTex_TexelSize.zw, float2(_BlurScale,0));
}

//垂直blur
float4 FragBlurV(Varyings i) : SV_Target
{
return GaussianBlur7Tap(_MainTex,i.uv * _MainTex_TexelSize.zw, float2(0,_BlurScale));
}

//颜色叠加
float4 Bloom(Varyings i) : SV_Target
{
return SampleColor(i.uv) + SAMPLE_TEXTURE2D_X(_BloomTex, sampler_BloomTex, i.uv) * _Tint;
}
ENDHLSL

Pass{
HLSLPROGRAM

#pragma vertex Vert
#pragma fragment FragGetLight

ENDHLSL
}

Pass{
HLSLPROGRAM

#pragma vertex Vert
#pragma fragment FragBlurH

ENDHLSL
}

Pass{
HLSLPROGRAM

#pragma vertex Vert
#pragma fragment FragBlurV

ENDHLSL
}

Pass{
HLSLPROGRAM

#pragma vertex Vert
#pragma fragment Bloom

ENDHLSL
}
}
}

总结

  1. float luminance = dot(float3(0.299,0.587,0.114),color.rgb); // 计算像素的亮度值,中间的参数可以按自己的来,总和等于1即可

Depth Of Field 景深

Depth Of Field

理论基础

景深效果产生的本质原因,是相机的对焦和散焦机制。而其背后的光学原理,则是透镜成像。
根据凸透镜高斯成像公式:
1/f = 1/u + 1/v
f: 焦距 - 由凸透镜本身决定
v: 物距 - 物体到凸透镜的距离
u: 像距 - 物体经过凸透镜后,成像位置与镜片的距离.

当物体通过凸透镜形成的象距正好在胶片位置时,那么我们就能得到一个清晰的成像。反之,象距和胶片差距越大,成像越模糊。

整理下运算公式

对应URP Mode = Bokeh
准备一下,输入参数有:

focalLength 胶片到镜片的距离 (胶距)
focusDistance 对焦距离 (物距)
aperture 光圈参数 (定义为 镜片焦距/镜片直径)
运算符号:

rcp 为倒数运算
那么有:

焦距公式

f = rcp(rcp(focalLength) + rcp(focusDistance))
镜片直径:

lensDiam = f * rcp(aperture)
根据物距计算像距:

输入参数:
objDis
输出:
imageDis = rcp(rcp(f) - rcp(objDis));
根据物距,计算弥散圆直径(CoC):

输入参数:
objDis

输出:
imageDis = CalculateImageDistance(objDis);
CoC = abs(imageDis - focalLength) * lensDiam / focalLength ;

输入参数:
depth
focalLength
输出:
objDis = depth - focalLength

代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;

namespace URPLearn
{
/// <summary>
/// 景深
/// 对应URP PPS中DOF的Bokeh模式
/// </summary>
[CreateAssetMenu(menuName = "URPLearn/DepthOfField")]
public class DepthOfField : PostProcessingEffect
{
[Tooltip("相机对焦的物距,单位m,在公式中记为u")]
[SerializeField]
private float _focusDistance = 1;

[Tooltip("相机的焦距(这里其实应该是成像胶片到镜头的距离),单位毫米,在公式中记为v")]
[SerializeField]
private float _focalLength;

[Tooltip("相机的光圈值F = f / 镜片直径")]
[SerializeField]
private float _aperture = 6.3f;

[Tooltip("Blur迭代次数,对性能有影响")]
[SerializeField]
private int _blurIteratorCount = 1;

[SerializeField]
private Shader _shader;

private Material _material;

private void OnValidate()
{
_aperture = Mathf.Clamp(_aperture, 1, 32);
_focalLength = Mathf.Clamp(_focalLength, 1, 300);
_focusDistance = Mathf.Max(_focusDistance, 0.1f);
_blurIteratorCount = Mathf.Clamp(_blurIteratorCount, 1, 5);
}

/// <summary>
/// 焦距倒数
/// </summary>
private float rcpf
{
get
{
return (0.001f / _focusDistance + 1 / _focalLength);
}
}

/// <summary>
/// 计算成像距离
/// </summary>
private float CalculateImageDistance(float objDis)
{
return 1 / (rcpf - 0.001f / objDis);
}

/// <summary>
/// 计算弥散圆直径
/// </summary>
private float CalculateConfusionCircleDiam(float objDis)
{
var imageDis = CalculateImageDistance(objDis);
return Mathf.Abs(imageDis - _focalLength) / (_focalLength * rcpf * _aperture);
}

/// <summary>
/// 光圈直径
/// </summary>
private float apertureDiam
{
get
{
return (1 / (rcpf * _aperture));
}
}

public override void Render(CommandBuffer cmd, ref RenderingData renderingData, PostProcessingRenderContext context)
{
if (_shader == null)
{
return;
}
if (_material == null)
{
_material = new Material(_shader);
}

var DOFParams = new Vector4(
rcpf,
_focalLength,
1 / (_focalLength * rcpf * _aperture),
0
);
_material.SetVector("_DOFParams", DOFParams);

for (int i = 0; i < _blurIteratorCount; i++)
{
context.BlitAndSwap(cmd, _material, 0);
context.BlitAndSwap(cmd, _material, 1);
}
}
}
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112

Shader "URPLearn/PostProcessing/DepthOfField"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
}
SubShader
{
ZTest Always ZWrite Off Cull Off
Tags { "RenderType" = "Opaque" "RenderPipeline" = "UniversalPipeline"}
HLSLINCLUDE

#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Filtering.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Input.hlsl"
#include "../Blur/Blur.hlsl"

TEXTURE2D_X(_MainTex);
TEXTURE2D_X_FLOAT(_CameraDepthTexture);

CBUFFER_START(UnityPerMaterial)
float4 _MainTex_TexelSize;
float4 _DOFParams;
CBUFFER_END

#define rcpF _DOFParams.x
#define focalLength _DOFParams.y
#define rcpFFA _DOFParams.z // rcp(_focalLength * rcpf * _aperture)

struct Attributes
{
float4 positionOS : POSITION;
float2 uv : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};

struct Varyings
{
float4 positionHS : SV_POSITION;
float2 uv : TEXCOORD0;
UNITY_VERTEX_OUTPUT_STEREO
};

Varyings Vert(Attributes input)
{
Varyings output;
UNITY_SETUP_INSTANCE_ID(input); //为支持GPUInstance?
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(output); //將output变量初始化
output.positionHS = TransformObjectToHClip(input.positionOS); //模型空间变化到齐次裁剪空间
output.uv = input.uv;
return output;
}

// 采样深度
float SampleDepth(float2 uv) {
return LOAD_TEXTURE2D_X(_CameraDepthTexture, _MainTex_TexelSize.zw * uv).x;
}

// 线性深度
float SampleEyeLinearDepth(float2 uv) {
return LinearEyeDepth(SampleDepth(uv), _ZBufferParams);
}

//计算像距
float4 CalculateImageDistance(float objDis) {
return rcp(rcpF - rcp(objDis));
}

//弥散圆直径
float CalculateConfusionCircleDiam(float objDis) {
float imageDis = CalculateImageDistance(objDis);
return abs(imageDis - focalLength) * rcpFFA;
}

float CalculateBlurFactor(float2 uv)
{
float depth = SampleEyeLinearDepth(uv); // Depth大小为m
float objDis = 1000 * depth - focalLength; // 传入的focalLength为mm
float diam = CalculateConfusionCircleDiam(objDis);
return diam;
}

float4 FragH(Varyings i) :SV_Target{
float factor = CalculateBlurFactor(i.uv);
return BoxBlur(_MainTex, i.uv * _MainTex_TexelSize.zw, 2, float2(factor, 0));
}

float4 FragV(Varyings i) : SV_Target{
float factor = CalculateBlurFactor(i.uv);
return BoxBlur(_MainTex, i.uv * _MainTex_TexelSize.zw, 2, float2(0, factor));
}

ENDHLSL

Pass
{
HLSLPROGRAM
#pragma vertex Vert
#pragma fragment FragH
ENDHLSL
}
Pass
{
HLSLPROGRAM
#pragma vertex Vert
#pragma fragment FragV
ENDHLSL
}
}
}

总结

学习各类效果前,需要先清楚其效果原理,大问题拆分成多个小问题,一个个问题对症下药的去解决
目前对于基础原理也不懂,只会直接套公式

SSAO 屏幕空间环境光遮蔽

理论基础

代码

总结