This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision Next revision Both sides next revision | ||
ibl_sample_shader_lys [2017/04/29 12:12] adavies |
ibl_sample_shader_lys [2017/05/16 02:53] adavies [Downloading and running the shader in FX Composer] |
||
---|---|---|---|
Line 1: | Line 1: | ||
====== Image Based Lighting Sample Shader ====== | ====== Image Based Lighting Sample Shader ====== | ||
- | Throughout the following page we are providing a free IBL sample shader, the associated source files for the shader and a general user guide in that you may test the various aspects of IBL with a minimal setup and iteration time. The shader specifically covers cube maps that have been generated within Lys, though may still be a very useful general reference in other cases. | + | Throughout the following page we are providing a free IBL sample shader and the associated source files. |
+ | |||
+ | The shader specifically covers cube maps that have been generated within Lys using the Burley option for image based lighting but may also serve as a useful reference in general. | ||
Line 12: | Line 14: | ||
- | - Download the shader and asset files from HERE. | + | - Download the shader and asset files from [[https://s3-us-west-2.amazonaws.com/knalduswest/docs/freeLysIblSample.zip|HERE]]. |
- | - Download FX Composer from HERE & install. | + | - Download FX Composer from [[http://developer.download.nvidia.com/tools/FX_Composer/2.5/FX_Composer2_2.51.0701.1135.exe|HERE]] & install. |
- Extract the .zip file to a location of your choosing. | - Extract the .zip file to a location of your choosing. | ||
- Double click the freeLysIblSample.fxcproj file found in the extracted folder. | - Double click the freeLysIblSample.fxcproj file found in the extracted folder. | ||
Line 21: | Line 23: | ||
- Enjoy! | - Enjoy! | ||
- | Below is the full shader. You can download the sample shader and assets HERE | + | Below is the full shader. You can download the sample shader and assets [[https://s3-us-west-2.amazonaws.com/knalduswest/docs/freeLysIblSample.zip|HERE]] |
<code glsl> | <code glsl> | ||
/* | /* | ||
- | % Free IBL sample using cube map exported from Knaldtech's tool Lys | + | % Copyright 2017 Knald Technologies, LLC |
+ | % See LICENSE.txt for licensing and redistribution terms. | ||
+ | % Free IBL sample using cube map exported from Knaldtech's tool Lys. https://www.knaldtech.com/lys/ | ||
% The cube map was made with offset set to 3 and exported as GGX with Burley roughness drop. | % The cube map was made with offset set to 3 and exported as GGX with Burley roughness drop. | ||
+ | % Despite the difference in distribution of MIPs the lit specular response resulting from the “roughness texture” | ||
+ | % will be identical to existing PBR based game engines and tools. | ||
*/ | */ | ||
Line 56: | Line 62: | ||
TextureCube lysBurleyCube < | TextureCube lysBurleyCube < | ||
- | string UIName = "IBL. cube"; | + | string UIName = "IBL. cube"; |
- | string ResourceType = "cube"; | + | string ResourceType = "cube"; |
>; | >; | ||
Texture2D albedo_tex < | Texture2D albedo_tex < | ||
- | string UIName = "albedo Texture"; | + | string UIName = "albedo Texture"; |
- | string ResourceType = "2D"; | + | string ResourceType = "2D"; |
>; | >; | ||
Texture2D smoothness_tex < | Texture2D smoothness_tex < | ||
- | string UIName = "smoothness Texture"; | + | string UIName = "smoothness Texture"; |
- | string ResourceType = "2D"; | + | string ResourceType = "2D"; |
>; | >; | ||
Texture2D metalness_tex < | Texture2D metalness_tex < | ||
- | string UIName = "metalness Texture"; | + | string UIName = "metalness Texture"; |
- | string ResourceType = "2D"; | + | string ResourceType = "2D"; |
>; | >; | ||
Texture2D normal_tex < | Texture2D normal_tex < | ||
- | string UIName = "normal Texture"; | + | string UIName = "normal Texture"; |
- | string ResourceType = "2D"; | + | string ResourceType = "2D"; |
>; | >; | ||
Texture2D ao_tex < | Texture2D ao_tex < | ||
- | string UIName = "ao Texture"; | + | string UIName = "ao Texture"; |
- | string ResourceType = "2D"; | + | string ResourceType = "2D"; |
>; | >; | ||
Line 91: | Line 97: | ||
float4 position : POSITION; | float4 position : POSITION; | ||
float3 normal : NORMAL; | float3 normal : NORMAL; | ||
- | float3 tang : TANGENT; | + | float3 tang : TANGENT; |
- | float3 bino : BINORMAL; | + | float3 bino : BINORMAL; |
float2 texcoord : TEXCOORD0; | float2 texcoord : TEXCOORD0; | ||
}; | }; | ||
Line 103: | Line 109: | ||
float3 normal : TEXCOORD1; | float3 normal : TEXCOORD1; | ||
float2 stcoord : TEXCOORD2; | float2 stcoord : TEXCOORD2; | ||
- | float3 tang : TEXCOORD3; | + | float3 tang : TEXCOORD3; |
- | float3 bino : TEXCOORD4; | + | float3 bino : TEXCOORD4; |
}; | }; | ||
Line 117: | Line 123: | ||
float ApproximateSpecularSelfOcclusion(float3 vR, float3 vertNormalNormalized); | float ApproximateSpecularSelfOcclusion(float3 vR, float3 vertNormalNormalized); | ||
+ | // Note that our implementation of BurleyToMip() below differs from the more typical | ||
+ | // form as cube maps convolved in Lys are based on RdotL and not NdotH. You can find | ||
+ | // a more detailed description in "Pre-convolved Cube Maps vs Path Tracers" | ||
+ | // Despite the difference in distribution of MIPs the lit specular response resulting from | ||
+ | // the “roughness texture” will be identical to existing PBR based game engines and tools. | ||
float BurleyToMip(float fPerceptualRoughness, int nMips, float NdotR) | float BurleyToMip(float fPerceptualRoughness, int nMips, float NdotR) | ||
{ | { | ||
- | float fSpecPower = SpecularPowerFromPerceptualRoughness(fPerceptualRoughness); | + | float fSpecPower = SpecularPowerFromPerceptualRoughness(fPerceptualRoughness); |
- | fSpecPower /= (4*max(NdotR, FLT_EPSILON)); // see section "Pre-convolved Cube Maps vs Path Tracers" | + | fSpecPower /= (4*max(NdotR, FLT_EPSILON)); // see section "Pre-convolved Cube Maps vs Path Tracers" |
- | float fScale = PerceptualRoughnessFromSpecularPower(fSpecPower); | + | float fScale = PerceptualRoughnessFromSpecularPower(fSpecPower); |
- | return fScale*(nMips-1-nMipOffset); | + | return fScale*(nMips-1-nMipOffset); |
} | } | ||
float BurleyToMipSimple(float fPerceptualRoughness, int nMips) | float BurleyToMipSimple(float fPerceptualRoughness, int nMips) | ||
{ | { | ||
- | float fScale = fPerceptualRoughness*(1.7 - 0.7*fPerceptualRoughness); // approximate remap from LdotR based distribution to NdotH | + | float fScale = fPerceptualRoughness*(1.7 - 0.7*fPerceptualRoughness); // approximate remap from LdotR based distribution to NdotH |
- | return fScale*(nMips-1-nMipOffset); | + | return fScale*(nMips-1-nMipOffset); |
} | } | ||
int GetNumMips(TextureCube cubeTex) | int GetNumMips(TextureCube cubeTex) | ||
{ | { | ||
- | int iWidth=0, iHeight=0, numMips=0; | + | int iWidth=0, iHeight=0, numMips=0; |
- | cubeTex.GetDimensions(0, iWidth, iHeight, numMips); | + | cubeTex.GetDimensions(0, iWidth, iHeight, numMips); |
- | return numMips; | + | return numMips; |
} | } | ||
Line 143: | Line 153: | ||
float3 GammaToLinear( float3 color) | float3 GammaToLinear( float3 color) | ||
{ | { | ||
- | return pow(color,2.2); | + | return pow(color,2.2); |
} | } | ||
float3 LinearToGamma( float3 linearColor) | float3 LinearToGamma( float3 linearColor) | ||
{ | { | ||
- | return pow(linearColor,1.0/2.2); | + | return pow(linearColor,1.0/2.2); |
} | } | ||
Line 155: | Line 165: | ||
vertexOutput main_VS(vertInput IN) | vertexOutput main_VS(vertInput IN) | ||
{ | { | ||
- | vertexOutput res; | + | vertexOutput res; |
- | + | ||
- | res.stcoord = float2(IN.texcoord.x, 1.0-IN.texcoord.y); | + | res.stcoord = float2(IN.texcoord.x, 1.0-IN.texcoord.y); |
- | + | ||
- | // transform attributes to world space | + | // transform attributes to world space |
- | res.pos = mul(float4(IN.position.xyz,1), g_mObjToWorld).xyz; | + | res.pos = mul(float4(IN.position.xyz,1), g_mObjToWorld).xyz; |
- | res.tang = normalize( mul(float4(IN.tang, 0), g_mObjToWorld ).xyz ); | + | res.tang = normalize( mul(float4(IN.tang, 0), g_mObjToWorld ).xyz ); |
- | res.bino = -normalize( mul(float4(IN.bino, 0), g_mObjToWorld ).xyz ); // bitangent negated in fxcomposer | + | res.bino = -normalize( mul(float4(IN.bino, 0), g_mObjToWorld ).xyz ); // bitangent negated in fxcomposer |
- | + | ||
- | // normals are transformed using inverse transposed so this gives us the normal in world space. | + | // normals are transformed using inverse transposed so this gives us the normal in world space. |
res.normal = normalize( mul(float4(IN.normal.xyz,0), g_mWorldToObjTransposed).xyz ); | res.normal = normalize( mul(float4(IN.normal.xyz,0), g_mWorldToObjTransposed).xyz ); | ||
- | + | | |
- | // used by rasterizer | + | // used by rasterizer |
- | res.position = mul(float4(IN.position.xyz, 1.0), g_mObjToViewProj); | + | res.position = mul(float4(IN.position.xyz, 1.0), g_mObjToViewProj); |
- | + | ||
- | return res; | + | return res; |
} | } | ||
Line 176: | Line 186: | ||
float4 main_FP(vertexOutput IN) : COLOR | float4 main_FP(vertexOutput IN) : COLOR | ||
{ | { | ||
- | // gather inputs | + | // gather inputs |
- | float3 vN = IN.normal; | + | float3 vN = IN.normal; |
- | float3 vT = IN.tang; | + | float3 vT = IN.tang; |
- | float3 vB = IN.bino; | + | float3 vB = IN.bino; |
- | float3 vN_unit = normalize(vN); | + | float3 vN_unit = normalize(vN); |
- | float3 pos = IN.pos; | + | float3 pos = IN.pos; |
- | float2 st = IN.stcoord.xy; | + | float2 st = IN.stcoord.xy; |
- | + | ||
- | // material properties from texture | + | // material properties from texture |
- | float smoothness = smoothness_tex.Sample(samLinear, st).x; // not gamma corrected | + | float smoothness = smoothness_tex.Sample(samLinear, st).x; // not gamma corrected |
- | float perceptualRoughness = 1.0 - smoothness; | + | float perceptualRoughness = 1.0 - smoothness; |
- | float metalness = metalness_tex.Sample(samLinear, st).x; // not gamma corrected | + | float metalness = metalness_tex.Sample(samLinear, st).x; // not gamma corrected |
- | float3 texNormal = 2*normal_tex.Sample(samLinear, st).xyz - 1.0; // not gamma corrected | + | float3 texNormal = 2*normal_tex.Sample(samLinear, st).xyz - 1.0; // not gamma corrected |
- | float3 albedo = GammaToLinear( albedo_tex.Sample(samLinear, st).xyz ); | + | float3 albedo = GammaToLinear( albedo_tex.Sample(samLinear, st).xyz ); |
- | float ao = ao_tex.Sample(samLinear, st).x; // not gamma corrected | + | float ao = ao_tex.Sample(samLinear, st).x; // not gamma corrected |
- | + | ||
- | // get camera position and direction in world space | + | // get camera position and direction in world space |
- | float3 eyePos = float3(g_mViewToWorld[3].x,g_mViewToWorld[3].y,g_mViewToWorld[3].z); | + | float3 eyePos = float3(g_mViewToWorld[3].x,g_mViewToWorld[3].y,g_mViewToWorld[3].z); |
- | float3 to_cam = normalize(eyePos - pos); // to view vector | + | float3 to_cam = normalize(eyePos - pos); // to view vector |
- | + | ||
- | // normal mapping | + | // normal mapping |
- | //vN = normalize(vT*texNormal.x + vB*texNormal.y + vN*texNormal.z); // tangent space normal map | + | //vN = normalize(vT*texNormal.x + vB*texNormal.y + vN*texNormal.z); // tangent space normal map |
- | vN = normalize( mul(float4(texNormal.xyz,0), g_mWorldToObjTransposed).xyz ); // object space normal map | + | vN = normalize( mul(float4(texNormal.xyz,0), g_mWorldToObjTransposed).xyz ); // object space normal map |
- | //vN = vN_unit; // normal mapping disabled (use interpolated vertex normal) | + | //vN = vN_unit; // normal mapping disabled (use interpolated vertex normal) |
- | + | ||
- | // evaluate ibl based brdf | + | // evaluate ibl based brdf |
- | float3 outRadiance = EvalBRDF(lysBurleyCube, vN, vN_unit, to_cam, perceptualRoughness, metalness, albedo, ao); | + | float3 outRadiance = EvalBRDF(lysBurleyCube, vN, vN_unit, to_cam, perceptualRoughness, metalness, albedo, ao); |
- | // sRGB not built into fx composer. Must do by hand. | + | // sRGB not built into fx composer. Must do by hand. |
- | // don't do this in your own engine. | + | // don't do this in your own engine. |
- | return float4(LinearToGamma(outRadiance), 1.0); | + | return float4(LinearToGamma(outRadiance), 1.0); |
} | } | ||
float3 EvalBRDF(TextureCube lysBurleyCube, float3 vN, float3 org_normal, float3 to_cam, float perceptualRoughness, float metalness, float3 albedo, float ao) | float3 EvalBRDF(TextureCube lysBurleyCube, float3 vN, float3 org_normal, float3 to_cam, float perceptualRoughness, float metalness, float3 albedo, float ao) | ||
{ | { | ||
- | int numMips = GetNumMips(lysBurleyCube); | + | int numMips = GetNumMips(lysBurleyCube); |
- | const int nrBrdfMips = numMips-nMipOffset; | + | const int nrBrdfMips = numMips-nMipOffset; |
- | float VdotN = clamp(dot(to_cam, vN), 0.0, 1.0f); // same as NdotR | + | float VdotN = clamp(dot(to_cam, vN), 0.0, 1.0f); // same as NdotR |
- | const float3 vRorg = 2*vN*VdotN-to_cam; | + | const float3 vRorg = 2*vN*VdotN-to_cam; |
- | + | ||
- | float3 vR = GetSpecularDominantDir(vN, vRorg, RoughnessFromPerceptualRoughness(perceptualRoughness)); | + | float3 vR = GetSpecularDominantDir(vN, vRorg, RoughnessFromPerceptualRoughness(perceptualRoughness)); |
- | float RdotNsat = saturate(dot(vN, vR)); | + | float RdotNsat = saturate(dot(vN, vR)); |
- | + | ||
- | #if 1 | + | #if 1 |
- | float l = BurleyToMip(perceptualRoughness, numMips, RdotNsat); | + | float l = BurleyToMip(perceptualRoughness, numMips, RdotNsat); |
#else | #else | ||
- | float l = BurleyToMipSimple(perceptualRoughness, numMips); | + | float l = BurleyToMipSimple(perceptualRoughness, numMips); |
#endif | #endif | ||
- | // fxcomposer uses a right hand coordinate frame (unlike d3d which uses left) | + | // fxcomposer uses a right hand coordinate frame (unlike d3d which uses left) |
- | // and has Y axis up. We've exported accordingly in Lys. For conventional | + | // and has Y axis up. We've exported accordingly in Lys. For conventional |
- | // d3d11 just set Y axis as up in Lys before export. | + | // d3d11 just set Y axis as up in Lys before export. |
- | float3 specRad = lysBurleyCube.SampleLevel(samLinear, vR, l).xyz; | + | float3 specRad = lysBurleyCube.SampleLevel(samLinear, vR, l).xyz; |
- | float3 diffRad = lysBurleyCube.SampleLevel(samLinear, vN, (float) (nrBrdfMips-1)).xyz; | + | float3 diffRad = lysBurleyCube.SampleLevel(samLinear, vN, (float) (nrBrdfMips-1)).xyz; |
- | + | ||
- | + | ||
- | float3 spccol = lerp( (float3) 0.04, albedo, metalness); | + | float3 spccol = lerp( (float3) 0.04, albedo, metalness); |
- | float3 dfcol = lerp( (float3) 0.0, albedo, 1-metalness); | + | float3 dfcol = lerp( (float3) 0.0, albedo, 1-metalness); |
- | + | ||
- | // fresnel | + | // fresnel |
- | float fT = 1.0-RdotNsat; | + | float fT = 1.0-RdotNsat; |
- | float fT5 = fT*fT; fT5 = fT5*fT5*fT; | + | float fT5 = fT*fT; fT5 = fT5*fT5*fT; |
- | spccol = lerp(spccol, (float3) 1.0, fT5); | + | spccol = lerp(spccol, (float3) 1.0, fT5); |
- | + | ||
- | // take reduction in brightness into account. | + | // take reduction in brightness into account. |
- | float fFade = GetReductionInMicrofacets(perceptualRoughness); | + | float fFade = GetReductionInMicrofacets(perceptualRoughness); |
- | fFade *= EmpiricalSpecularAO(ao, perceptualRoughness); | + | fFade *= EmpiricalSpecularAO(ao, perceptualRoughness); |
- | fFade *= ApproximateSpecularSelfOcclusion(vR, org_normal); | + | fFade *= ApproximateSpecularSelfOcclusion(vR, org_normal); |
- | + | ||
- | // final result | + | // final result |
- | return ao*dfcol*diffRad + fFade*spccol*specRad; | + | return ao*dfcol*diffRad + fFade*spccol*specRad; |
} | } | ||
float GetReductionInMicrofacets(float perceptualRoughness) | float GetReductionInMicrofacets(float perceptualRoughness) | ||
{ | { | ||
- | // this is not needed if you separately precompute an integrated FG term such as proposed | + | // this is not needed if you separately precompute an integrated FG term such as proposed |
- | // by epic. Alternatively this simple analytical approximation retains the energy | + | // by epic. Alternatively this simple analytical approximation retains the energy |
- | // loss associated with Integral GGX(NdotH)*NdotH * (NdotL>0) dH which | + | // loss associated with Integral GGX(NdotH)*NdotH * (NdotL>0) dH which |
- | // for GGX equals 1/(roughness^2+1) when integrated over the half sphere. | + | // for GGX equals 1/(roughness^2+1) when integrated over the half sphere. |
- | // without the NdotL>0 indicator term the integral equals one. | + | // without the NdotL>0 indicator term the integral equals one. |
- | float roughness = RoughnessFromPerceptualRoughness(perceptualRoughness); | + | float roughness = RoughnessFromPerceptualRoughness(perceptualRoughness); |
- | return 1.0 / (roughness*roughness+1.0); | + | return 1.0 / (roughness*roughness+1.0); |
} | } | ||
float EmpiricalSpecularAO(float ao, float perceptualRoughness) | float EmpiricalSpecularAO(float ao, float perceptualRoughness) | ||
{ | { | ||
- | // basically a ramp curve allowing ao on very diffuse specular | + | // basically a ramp curve allowing ao on very diffuse specular |
- | // and gradually less so as the reflection hardens. | + | // and gradually less so as the reflection hardens. |
- | float fSmooth = 1-perceptualRoughness; | + | float fSmooth = 1-perceptualRoughness; |
- | float fSpecAo = gain(ao,0.5+max(0.0,fSmooth*0.4)); | + | float fSpecAo = gain(ao,0.5+max(0.0,fSmooth*0.4)); |
- | + | ||
- | return min(1.0,fSpecAo + lerp(0.0, 0.5, fSmooth*fSmooth*fSmooth*fSmooth)); | + | return min(1.0,fSpecAo + lerp(0.0, 0.5, fSmooth*fSmooth*fSmooth*fSmooth)); |
} | } | ||
Line 276: | Line 286: | ||
float ApproximateSpecularSelfOcclusion(float3 vR, float3 vertNormalNormalized) | float ApproximateSpecularSelfOcclusion(float3 vR, float3 vertNormalNormalized) | ||
{ | { | ||
- | const float fFadeParam = 1.3; | + | const float fFadeParam = 1.3; |
- | float rimmask = clamp( 1 + fFadeParam * dot(vR, vertNormalNormalized), 0.0, 1.0); | + | float rimmask = clamp( 1 + fFadeParam * dot(vR, vertNormalNormalized), 0.0, 1.0); |
- | rimmask *= rimmask; | + | rimmask *= rimmask; |
- | + | ||
- | return rimmask; | + | return rimmask; |
} | } | ||
float RoughnessFromPerceptualRoughness(float fPerceptualRoughness) | float RoughnessFromPerceptualRoughness(float fPerceptualRoughness) | ||
{ | { | ||
- | return fPerceptualRoughness*fPerceptualRoughness; | + | return fPerceptualRoughness*fPerceptualRoughness; |
} | } | ||
float PerceptualRoughnessFromRoughness(float fRoughness) | float PerceptualRoughnessFromRoughness(float fRoughness) | ||
{ | { | ||
- | return sqrt(max(0.0,fRoughness)); | + | return sqrt(max(0.0,fRoughness)); |
} | } | ||
float SpecularPowerFromPerceptualRoughness(float fPerceptualRoughness) | float SpecularPowerFromPerceptualRoughness(float fPerceptualRoughness) | ||
{ | { | ||
- | float fRoughness = RoughnessFromPerceptualRoughness(fPerceptualRoughness); | + | float fRoughness = RoughnessFromPerceptualRoughness(fPerceptualRoughness); |
- | return (2.0/max(FLT_EPSILON, fRoughness*fRoughness))-2.0; | + | return (2.0/max(FLT_EPSILON, fRoughness*fRoughness))-2.0; |
} | } | ||
float PerceptualRoughnessFromSpecularPower(float fSpecPower) | float PerceptualRoughnessFromSpecularPower(float fSpecPower) | ||
{ | { | ||
- | float fRoughness = sqrt(2.0/(fSpecPower + 2.0)); | + | float fRoughness = sqrt(2.0/(fSpecPower + 2.0)); |
- | return PerceptualRoughnessFromRoughness(fRoughness); | + | return PerceptualRoughnessFromRoughness(fRoughness); |
} | } | ||
Line 308: | Line 318: | ||
float3 GetSpecularDominantDir(float3 vN, float3 vR, float fRealRoughness) | float3 GetSpecularDominantDir(float3 vN, float3 vR, float fRealRoughness) | ||
{ | { | ||
- | float fInvRealRough = saturate(1 - fRealRoughness); | + | float fInvRealRough = saturate(1 - fRealRoughness); |
- | float lerpFactor = fInvRealRough * (sqrt(fInvRealRough)+fRealRoughness); | + | float lerpFactor = fInvRealRough * (sqrt(fInvRealRough)+fRealRoughness); |
- | return lerp(vN, vR, lerpFactor); | + | return lerp(vN, vR, lerpFactor); |
} | } | ||
float bias(float value, float b) | float bias(float value, float b) | ||
{ | { | ||
- | return (b > 0.0) ? pow(value, log(b) / log(0.5)) : 0.0; | + | return (b > 0.0) ? pow(value, log(b) / log(0.5)) : 0.0; |
} | } | ||
Line 322: | Line 332: | ||
float gain(float value, float g) | float gain(float value, float g) | ||
{ | { | ||
- | return 0.5 * ((value < 0.5) ? bias(2.0*value, 1.0-g) : (2.0 - bias(2.0-2.0*value, 1.0-g))); | + | return 0.5 * ((value < 0.5) ? bias(2.0*value, 1.0-g) : (2.0 - bias(2.0-2.0*value, 1.0-g))); |
} | } | ||
Line 347: | Line 357: | ||
technique10 Render { | technique10 Render { | ||
- | pass p0 { | + | pass p0 { |
- | SetVertexShader( CompileShader( vs_4_0, main_VS() ) ); | + | SetVertexShader( CompileShader( vs_4_0, main_VS() ) ); |
SetPixelShader( CompileShader( ps_4_0, main_FP() ) ); | SetPixelShader( CompileShader( ps_4_0, main_FP() ) ); | ||
| | ||
Line 354: | Line 364: | ||
SetDepthStencilState( EnableDepth, 0 ); | SetDepthStencilState( EnableDepth, 0 ); | ||
SetRasterizerState(RasterizerSettings); | SetRasterizerState(RasterizerSettings); | ||
- | } | + | } |
} | } | ||
</code> | </code> |