现在有时间把动态地形着色部分写了。
之前我们已经实现了网格控制部分,但是着色确实有问题的,如下:
一眼就看得出来,法向量是错的,因为我们构建平面网格的时候法向量全赋值:
normals[index] = new Vector3(0, 1, 0);
所以我们需要根据具体网格顶点变换后重计算法向量,当然重计算也是分c#计算和shader计算的,这里我们使用c#计算。
首先我们使用c#重构网格坐标,如下:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
[RequireComponent(typeof(MeshRenderer))]
[RequireComponent(typeof(MeshFilter))]
public class GroundPanelMesh : MonoBehaviour
{
public Texture2D noiseTex;
[Range(1f, 100f)]
public float heightInten = 1f;
public int cellCount = 50;
public float cellWidth = 5f;
private MeshRenderer meshRender;
private MeshFilter meshFilter;
private Mesh mesh;
private void Awake()
{
meshRender = GetComponent<MeshRenderer>();
meshFilter = GetComponent<MeshFilter>();
mesh = new Mesh();
}
void Start()
{
}
void Update()
{
}
public void CreateMesh()
{
if (mesh != null)
{
mesh.Clear();
}
int cellVertexCount = cellCount + 1;
Vector3[] vertices = new Vector3[cellVertexCount * cellVertexCount];
Vector3[] normals = new Vector3[cellVertexCount * cellVertexCount];
Vector2[] uvs = new Vector2[cellVertexCount * cellVertexCount];
int[] triangles = new int[cellCount * cellCount * 6];
int triindex = 0;
Vector3 halfbias = new Vector3(cellCount * cellWidth / 2f, 0, cellCount * cellWidth / 2f);
//逐行扫描
//居中生成
for (int y = 0; y <= cellCount; y++)
{
for (int x = 0; x <= cellCount; x++)
{
int index = cellVertexCount * y + x;
vertices[index] = new Vector3(x * cellWidth, 0, y * cellWidth) - halfbias;
normals[index] = new Vector3(0, 1, 0);
uvs[index] = new Vector2((float)x / (float)cellCount, (float)y / (float)cellCount);
if (x < cellCount && y < cellCount)
{
int topindex = x + y * cellVertexCount;
int bottomindex = x + (y + 1) * cellVertexCount;
triangles[triindex + 5] = topindex;
triangles[triindex + 4] = topindex + 1;
triangles[triindex + 3] = bottomindex + 1;
triangles[triindex + 2] = topindex;
triangles[triindex + 1] = bottomindex + 1;
triangles[triindex] = bottomindex;
triindex += 6;
}
}
}
mesh.vertices = vertices;
mesh.normals = normals;
mesh.triangles = triangles;
mesh.uv = uvs;
meshFilter.sharedMesh = mesh;
}
/// <summary>
/// 根据噪声高度图生成网格
/// </summary>
public void CreateMeshWithNoise()
{
if (mesh != null)
{
mesh.Clear();
}
int cellVertexCount = cellCount + 1;
Vector3[] vertices = new Vector3[cellVertexCount * cellVertexCount];
Vector2[] uvs = new Vector2[cellVertexCount * cellVertexCount];
int[] triangles = new int[cellCount * cellCount * 6];
int triindex = 0;
Vector3 halfbias = new Vector3(cellCount * cellWidth / 2f, 0, cellCount * cellWidth / 2f);
int texwid = noiseTex.width;
int texhei = noiseTex.height;
for (int y = 0; y <= cellCount; y++)
{
for (int x = 0; x <= cellCount; x++)
{
int index = cellVertexCount * y + x;
vertices[index] = new Vector3(x * cellWidth, 0, y * cellWidth) - halfbias;
//采样噪声图,计算高度
int px = (int)((float)x / (float)cellCount * texwid);
int py = (int)((float)y / (float)cellCount * texhei);
Color col = noiseTex.GetPixel(px, py);
float r = col.r;
Vector3 vh = new Vector3(0, r * heightInten, 0);
vertices[index] += vh;
uvs[index] = new Vector2((float)x / (float)cellCount, (float)y / (float)cellCount);
if (x < cellCount && y < cellCount)
{
int topindex = x + y * cellVertexCount;
int bottomindex = x + (y + 1) * cellVertexCount;
triangles[triindex + 5] = topindex;
triangles[triindex + 4] = topindex + 1;
triangles[triindex + 3] = bottomindex + 1;
triangles[triindex + 2] = topindex;
triangles[triindex + 1] = bottomindex + 1;
triangles[triindex] = bottomindex;
triindex += 6;
}
}
}
mesh.vertices = vertices;
mesh.triangles = triangles;
mesh.uv = uvs;
//重计算法向量
mesh.RecalculateNormals();
meshFilter.sharedMesh = mesh;
}
}
添加一个基于噪声贴图创建网格顶点和法向量重计算的功能,效果如下:
这样的话,基于法向量光照计算就解决了,但是用c#处理顶点和法向量,会导致一个问题,如下:
tessellation“失效了”,当然曲面细分不是真的“失效”,而是因为如果噪声高度图在c#中计算而不在shader的vert函数中采样计算的话,曲面细分就无法根据噪声高度图进行插值细分,这样曲面细分的功能就算是“失效”了。
那么我们最优的做法就是将噪声高度和法向量计算全都放在shader中,前面说过了,我是为了pc平台运行,如果需要在手机平台运行的话,做到这一步基本就ok了,tessellation都不需要,因为嵌入式设备的gpu都不一定支持这个图形特性。
这样的话,就需要开发一个生成网格模型法线贴图的工具了,如下:
首先,左边是需要生成的n*m分辨率的法线贴图,右边是UV1-UVn的网格uv数据。当然像素数量肯定是大于等于uv数量的,那么就需要解决一个映射的问题,如下:
P(x,y) = UV(?)
就是法线贴图中任意一个像素怎么去匹配网格UV数据之间的关系?
我的解决方案就是通过网格拓扑三角面进行Pn->UV映射计算,如下:
像素Pn处于网格的一个拓扑三角面中,那么Pn的UV映射计算同时可以得到法向量的计算公式,如下:
通过NbNp向量与NaNc向量相交得到N1,就可推算出Np法向量,那么核心就是需要计算二维射线与线段相交。
好,分析到这里,写代码实现,如下:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEditor;
using System.IO;
public class EditorMeshNormalTextureGenerator : EditorWindow
{
private string texWidString;
private int texWidth;
private string texHeiString;
private int texHeight;
private string texName;
private Transform selectTrans;
private MeshFilter meshFilter;
private Mesh mesh;
private Texture2D nmTex;
private Color[] pix;
private int[] triangles;
private Vector2[] uvs;
private Vector3[] normals;
private int unCheck = 0;
[MenuItem("GameTools/InfinityGround/GenerateMeshNormalTexture")]
static void execute()
{
EditorMeshNormalTextureGenerator win = (EditorMeshNormalTextureGenerator)EditorWindow.GetWindow(typeof(EditorMeshNormalTextureGenerator), false, "GenerateMeshNormalTexture");
win.Show();
}
private void OnGUI()
{
EditorGUILayout.LabelField("选择网格模型物体");
selectTrans = Selection.activeTransform;
if (selectTrans != null)
{
EditorGUILayout.ObjectField(new GUIContent("模型:"), selectTrans, typeof(Transform), false);
}
EditorGUILayout.LabelField("输入Texture宽度");
texWidString = EditorGUILayout.TextField("int类型:", texWidString);
EditorGUILayout.LabelField("输入Texture高度");
texHeiString = EditorGUILayout.TextField("int类型:", texHeiString);
EditorGUILayout.LabelField("输入Texture名称");
texName = EditorGUILayout.TextField("string类型:", texName);
if (GUILayout.Button("生成法线贴图"))
{
if (selectTrans == null)
{
this.ShowNotification(new GUIContent("请选择网格模型物体"));
return;
}
if (!int.TryParse(texWidString, out texWidth)
|| !int.TryParse(texHeiString, out texHeight))
{
this.ShowNotification(new GUIContent("请输入int类型长宽,float类型缩放"));
return;
}
meshFilter = selectTrans.GetComponent<MeshFilter>();
mesh = meshFilter.sharedMesh;
GetMeshParams();
Generate();
}
}
/// <summary>
/// 获取mesh参数
/// </summary>
private void GetMeshParams()
{
triangles = mesh.triangles;
uvs = mesh.uv;
normals = mesh.normals;
}
#region ///像素和uv关系检测
/// <summary>
/// 检测pixeluv和uva共点
/// </summary>
/// <param name="pxuv"></param>
/// <param name="uva"></param>
/// <returns></returns>
private bool CheckPixelInDot(Vector2 pxuv, Vector2 uva)
{
if (Mathf.Approximately(pxuv.x, uva.x)
&& Mathf.Approximately(pxuv.y, uva.y))
{
return true;
}
return false;
}
/// <summary>
/// 检测pixeluv在uva-uvb线段上
/// </summary>
/// <param name="pxuv"></param>
/// <param name="uva"></param>
/// <param name="uvb"></param>
/// <returns></returns>
private bool CheckPixelInLine(Vector2 pxuv, Vector2 uva, Vector2 uvb)
{
float k = (uvb.y - uva.y) / (uvb.x - uva.x);
float d = uva.y - k * uva.x;
float y1 = pxuv.y;
float y2 = k * pxuv.x + d;
if (Mathf.Approximately(y1, y2))
{
return true;
}
return false;
}
/// <summary>
/// 检测pixeluv在uva、uvb、uvc三角形中
/// </summary>
/// <param name="pxuv"></param>
/// <param name="uva"></param>
/// <param name="uvb"></param>
/// <param name="uvc"></param>
/// <returns></returns>
private bool CheckPixelInTriangle(Vector2 pxuv, Vector2 uva, Vector2 uvb, Vector2 uvc)
{
Vector2 pa = uva - pxuv;
Vector2 pb = uvb - pxuv;
Vector2 pc = uvc - pxuv;
float angleapb = Vector2.Angle(pb, pa);
float anglebpc = Vector2.Angle(pc, pb);
float anglecpa = Vector2.Angle(pa, pc);
float angle = angleapb + anglebpc + anglecpa;
if (Mathf.Approximately(angle, 360f))
{
return true;
}
return false;
}
/// <summary>
/// 检测pixeluv在uva、uvb、uvc外接矩形中
/// </summary>
/// <param name="pxuv"></param>
/// <param name="uva"></param>
/// <param name="uvb"></param>
/// <param name="uvc"></param>
/// <returns></returns>
private bool CheckPixelInOutRect(Vector2 pxuv, Vector2 uva, Vector2 uvb, Vector2 uvc)
{
float[] xarr = new float[] { uva.x, uvb.x, uvc.x };
float[] yarr = new float[] { uva.y, uvb.y, uvc.y };
float xmin = GetFloatArrMin(xarr);
float xmax = GetFloatArrMax(xarr);
float ymin = GetFloatArrMin(yarr);
float ymax = GetFloatArrMax(yarr);
if (pxuv.x > xmax
|| pxuv.x < xmin
|| pxuv.y > ymax
|| pxuv.y < ymin)
{
return false;
}
return true;
}
private float GetFloatArrMin(float[] arr)
{
float min = float.MaxValue;
for (int i = 0; i < arr.Length; i++)
{
if (min > arr[i])
min = arr[i];
}
return min;
}
private float GetFloatArrMax(float[] arr)
{
float max = float.MinValue;
for (int i = 0; i < arr.Length; i++)
{
if (max < arr[i])
max = arr[i];
}
return max;
}
#endregion
#region ///像素法向量计算
private Vector3 GetPixelNormal(Vector2Int px)
{
Vector2 pxuv = new Vector2((float)px.x / (float)texWidth, (float)px.y / (float)texHeight);
Vector3 pxnorm = new Vector3(0, 0, 1);
bool check = false;
for (int i = 0; i < triangles.Length; i += 3)
{
int ta = triangles[i];
int tb = triangles[i + 1];
int tc = triangles[i + 2];
Vector2 uva = uvs[ta];
Vector2 uvb = uvs[tb];
Vector2 uvc = uvs[tc];
Vector3 norma = normals[ta];
Vector3 normb = normals[tb];
Vector3 normc = normals[tc];
//首先检测在abc外接矩形中
if (CheckPixelInOutRect(pxuv, uva, uvb, uvc))
{
//然后检测是否和abc三个点共点
if (CheckPixelInDot(pxuv, uva))
{
pxnorm = norma;
check = true;
break;
}
if (CheckPixelInDot(pxuv, uvb))
{
pxnorm = normb;
check = true;
break;
}
if (CheckPixelInDot(pxuv, uvc))
{
pxnorm = normc;
check = true;
break;
}
//再检测是否在ab、bc、ca线段上
if (CheckPixelInLine(pxuv, uva, uvb))
{
float k = Vector2.Distance(uva, pxuv) / Vector2.Distance(uva, uvb);
pxnorm = norma + (normb - norma) * k;
check = true;
break;
}
if (CheckPixelInLine(pxuv, uvb, uvc))
{
float k = Vector2.Distance(uvb, pxuv) / Vector2.Distance(uvb, uvc);
pxnorm = normb + (normc - normb) * k;
check = true;
break;
}
if (CheckPixelInLine(pxuv, uvc, uva))
{
float k = Vector2.Distance(uvc, pxuv) / Vector2.Distance(uvc, uva);
pxnorm = normc + (norma - normc) * k;
check = true;
break;
}
//最后再检测是否在三角形abc中
if (CheckPixelInTriangle(pxuv, uva, uvb, uvc))
{
float xa = uva.x, ya = uva.y, xb = uvb.x, yb = uvb.y, xc = uvc.x, yc = uvc.y, xp = pxuv.x, yp = pxuv.y;
float k1 = (yp - yb) / (xp - xb);
float d1 = yb - k1 * xb;
float k2 = (yc - ya) / (xc - xa);
float d2 = ya - k2 * xa;
float p1x = (d2 - d1) / (k1 - k2);
float p1y = k1 * p1x + d1;
Vector2 p1uv = new Vector2(p1x, p1y);
float x1 = Vector2.Distance(uva, p1uv);
float y1 = Vector2.Distance(p1uv, uvc);
Vector3 p1norm = norma + (normc - norma) * x1 / (x1 + y1);
float x2 = Vector2.Distance(p1uv, pxuv);
float y2 = Vector2.Distance(pxuv, uvb);
pxnorm = p1norm + (normb - p1norm) * x2 / (x2 + y2);
check = true;
break;
}
}
}
if (!check)
{
unCheck++;
#if UNITY_EDITOR
Debug.LogErrorFormat("EditorMeshNormalTextureGenerator GetPixelNormal Error px = {0} pxuv = {1} uncheck = {2}", px, pxuv, unCheck);
#endif
}
return pxnorm;
}
#endregion
/// <summary>
/// 法向量转颜色值
/// </summary>
/// <param name="vec"></param>
/// <returns></returns>
private Color NormalVectorToColor(Vector3 vec)
{
float r = PackNormal(vec.x);
float g = PackNormal(vec.y);
float b = PackNormal(vec.z);
return new Color(r, g, b, 1);
}
/// <summary>
/// r=[-1,1]
/// 转换到[0,1]
/// </summary>
/// <param name="r"></param>
/// <returns></returns>
private float PackNormal(float r)
{
float g = (r + 1f) / 2f;
return g;
}
private void Generate()
{
unCheck = 0;
pix = new Color[texWidth * texHeight];
for (int y = 0; y < texHeight; y++)
{
for (int x = 0; x < texWidth; x++)
{
Vector2Int px = new Vector2Int(x, y);
Vector3 norm = GetPixelNormal(px);
pix[y * texWidth + x] = NormalVectorToColor(norm);
}
}
nmTex = new Texture2D(texWidth, texHeight);
nmTex.SetPixels(pix);
nmTex.Apply();
byte[] buffer = nmTex.EncodeToJPG();
string filepath = Application.dataPath + "/InfinityGround/Texture/" + texName + ".jpg";
File.WriteAllBytes(filepath, buffer);
AssetDatabase.Refresh();
}
}
核心功能就是计算Pn和UV三角面的关系,再权重计算法向量,测试一下效果,如下:
这里可以发现很多问题,首先生成的法线贴图是五颜六色的,这也好理解,我们以前聊过法线映射,知道切线空间和模型空间的法向量区别,目前我们创建法线贴图的法向量是模型空间的,法向量朝向“四面八方”,所以颜色值就是“五颜六色”。
其次发现创建的贴图噪点多就罢了,还是不是蹦出一堆"紫色缺口",这就意味着像素与所有UV三角形“无关”,这我就得好好打印调试一下了。
private void Generate()
{
unCheck = 0;
nmPix = new Color[texWidth * texHeight];
ckPix = new Color[texWidth * texHeight];
for (int y = 0; y < texHeight; y++)
{
for (int x = 0; x < texWidth; x++)
{
int index = y * texWidth + x;
Vector2Int px = new Vector2Int(x, y);
bool check;
Vector3 norm = GetPixelNormal(px, out check);
nmPix[index] = NormalVectorToColor(norm);
ckPix[index] = check ? Color.white : Color.black;
}
}
nmTex = new Texture2D(texWidth, texHeight);
nmTex.SetPixels(nmPix);
nmTex.Apply();
byte[] buffer = nmTex.EncodeToJPG();
string filepath = Application.dataPath + "/InfinityGround/Texture/" + texName + ".jpg";
File.WriteAllBytes(filepath, buffer);
ckTex = new Texture2D(texWidth, texHeight);
ckTex.SetPixels(ckPix);
ckTex.Apply();
buffer = ckTex.EncodeToJPG();
filepath = Application.dataPath + "/InfinityGround/Texture/" + texName + "_check.jpg";
File.WriteAllBytes(filepath, buffer);
AssetDatabase.Refresh();
}
生成后发现如下情况:
通过黑白检测图可以看出除了噪点之外,并没有检测异常的情况,那么先解决噪点,方法我想的是降低检测精度,因为数学上浮点数判断都是有精度差异的,如果精度太高,则检测太严格导致“近视判断失败”的概率更高,所以先尝试降低检测精度,如下:
private float floatTole;
#region ///浮点判断计算
private bool FloatApproximate(float a, float b)
{
float tole = Mathf.Abs(a * floatTole);
if (a > b + tole || a < b - tole)
{
return false;
}
return true;
}
private bool Vector2Approximate(Vector2 a, Vector2 b)
{
float ax = a.x, ay = a.y, bx = b.x, by = b.y;
if (!FloatApproximate(ax, bx) || !FloatApproximate(ay, by))
{
return false;
}
return true;
}
private bool Vector3Approximate(Vector3 a, Vector3 b)
{
float ax = a.x, ay = a.y, bx = b.x, by = b.y, az = a.z, bz = b.z;
if (!FloatApproximate(ax, bx) || !FloatApproximate(ay, by) || !FloatApproximate(az, bz))
{
return false;
}
return true;
}
#endregion
我们封装浮点判断方法,然后依次使用不同浮点公差进行生成,如下:
看的都出来公差越小,法向量线性插值越平滑,但是噪点则越多。反之公差越大,法向量插值越粗糙,但是噪点就没了。而且浮点公差计算刚好解决了判断缺口的问题。
那么应用在地图块上,就只能一点点测试公差数值了,我依次生成了几张不同公差的法线贴图,如下:
我个人觉得公差在0.00001效果最好。
那么接下来我们还得处理法向量转到切线空间的问题,以前我们聊过法线映射的概念,知道TBN(Tangent、BiTangent、Normal)矩阵,这个就是用来处理模型空间到切线空间坐标变换,如下:
同时Unity提供我们TBN矩阵示例,如下:
#define TANGENT_SPACE_ROTATION \
float3 binormal = cross( normalize(v.normal), normalize(v.tangent.xyz) ) * v.tangent.w; \
float3x3 rotation = float3x3( v.tangent.xyz, binormal, v.normal )
这就意味着,如果我们单纯处理法向量到切线空间,法向量本身就是TBN中N,那么处理出的切线空间法线全都是(0,0,1),则贴图就是纯紫色,如下:
这里就要来聊一个概念了,那就是烘培法线贴图。
一般我们使用3dmax或者blender制作高精度模型,这种模型并不能在引擎中直接使用,美术们还得减面到合适的数量,可以让普通电脑或者手机也能流程渲染的程度,比如一百万面的高模A简化到一万面的简模B。但是如此一来,简模B就不具备高模A的光照细节了,那么如果有一张贴图记录了高模A相对于简模B的所有凹凸细节,就可以在简模B的着色光照计算中得到高模A的光照效果。
注意,最重要的“相对于”概念。而法线贴图就是高模A的法向量相对于简模B的切线空间最终形成的贴图。如下:
如果我们用地形块为例,高模(也就是经过噪声高度处理网格)A法向量N1相对于简模(也就是平面网格)的TBN矩阵变换后,才得到了高模A相对于简模B的切线空间法向量。
下面写代码处理:
private Matrix3x3 tbnMat = new Matrix3x3(new Vector3(0, 0, 1), new Vector3(1, 0, 0), new Vector3(0, 1, 0));
pxnorm = tbnMat * pxnorm.normalized;
return pxnorm.normalized;
最终生成法线贴图如下:
接下来用这张切线空间法线贴图进行渲染,如下:
Shader "InfinityGround/GroundChunkTesselSurfaceShader"
{
Properties
{
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
_TesselMin("Tessellation Min Distance",Range(0,200)) = 1
_TesselMax("Tessellation Max Distance",Range(0,400)) = 1
_TesselFactor("Tessellation Factor",Range(1,20)) = 5
_NoiseTex("Noise Texture",2D) = "white" {}
_HeightInten("Height Intensity",Range(0,100)) = 10
_NormalTex("Normal Texture",2D) = "white" {}
[Toggle]_IsNorm("Is Apply Normal",int) = 0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Standard fullforwardshadows vertex:vert tessellate:tess
#pragma target 5.0
#include "Tessellation.cginc"
sampler2D _MainTex;
struct Input
{
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
float _TesselMin;
float _TesselMax;
int _TesselFactor;
sampler2D _NoiseTex;
float _HeightInten;
sampler2D _NormalTex;
int _IsNorm;
float4 tess(appdata_tan v0,appdata_tan v1,appdata_tan v2)
{
float4 v = UnityDistanceBasedTess(v0.vertex,v1.vertex, v2.vertex,_TesselMin,_TesselMax,_TesselFactor);
return v;
}
void vert(inout appdata_tan v)
{
float3 normal = UnityObjectToWorldNormal(v.normal);
float r = tex2Dlod(_NoiseTex,v.texcoord).r;
v.vertex+=float4(normal*_HeightInten*r,0);
}
UNITY_INSTANCING_BUFFER_START(Props)
UNITY_INSTANCING_BUFFER_END(Props)
void surf (Input IN, inout SurfaceOutputStandard o)
{
fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
if(_IsNorm==1)
{
o.Normal = UnpackNormal(tex2D(_NormalTex, IN.uv_MainTex));
}
}
ENDCG
}
FallBack "Diffuse"
}
效果如下:
这样我们就完成最终的曲面细分带法线光照效果了。
当然还有两个锦上添花的处理:
1.法线贴图线性插值不够平滑,法线贴图分辨率不够高。这个问题我们可以通过增加高模(也就是噪声高度计算)的顶点数量,和增大生成分辨率解决,只不过生成速度可以吃一顿饭了。
2.有没有通用的切线空间法线贴图生成方法呢?其实很简单,我们拿到任意高模A和其简模B的网格数据,包含uv、法向量、切线、顶点坐标等数据。再通过上面的UV三角形检测和线性插值方法,得到法线贴图每个pixel对应的高模A模型空间法向量和简模B顶点切线空间TBN矩阵,就可以计算得到最终的切线空间法向量。
如果有需要和时间,我会做成unity插件功能,并且使用多线程和ComputeShader进行计算加速。