Ocean Shader with Projected Grid

This water shader was presented in [1], and has the following characteristics:

  • Water represented as a 2D grid
  • Large visible water surface with special LOD algorithm: the vertex grid is projected to be always in front of the camera.
  • Wave motion is based on random generated Perlin noise

Detailed description of the shader (full thesis of Claes Johanson):
Projected Grid Ocean Shader - Full - html version

Sample images

appe2.jpg

The demo implementation that was developed during the work on this thesis was implemented using the implementation suggested in Section 3.1.3. It was implemented using DirectX 9 and the target platform is the ATi R300-class of GPUs (Radeon 9500 or higher).

The animated Perlin noise is rendered on the CPU. The base resolution of each octave is 32x32 pixels and four octaves are stacked together in texture chunks with the resulting resolution of 256x256 pixels. Two of these chunks were used for a total of 8 octaves of noise.
The plane was forced to the xz-plane for simplicity’s sake.
The vertices were (at default) calculated at a grid with the resolution of 65x129 vertices, resulting in 64x128x2 triangles. The line-plane intersections were only done at the corner vertices, the rest were interpolated using homogenous coordinates. The height-data was read by linear interpolation (but without mip-mapping) on the CPU before the vertex buffer were uploaded to the GPU as world-space coordinates.
The two chunks of Perlin noise was uploaded to the GPU and the same height map was generated on the GPU but with the resolution of 512x1024 pixels. Aliasing was avoided by using mip-mapping. The normal map was generated from and at the same resolution as the height map.
A simple demo scene consisting of an island and a moving rubber duck was mirrored against Sbase and rendered to a texture for use as local reflections. The alpha channel was later used to determine which parts of the texture that was transparent.
A sky dome, the demo scene and the water surface was rendered to the back-buffer.

The water rendering was done by doing the following computations per-pixel

  • Using the normal map and the view-direction (at every point, not just the direction of the camera) the reflection vector was calculated.
  • Using a cube map texture read the reflected part of the sky was sampled (RG).
  • The Fresnel reflectance (f) was obtained by doing a 1D texture read of the dot-product of the reflection vector and the normal.
  • The sunlight contribution (RS) was calculated with phong shading using the normal map and a lightdirection vector.
  • The local reflections (RL) was approximated (cannot be done properly without ray tracing) by offsetting the texture coordinate with the xz-part of the normal.
  • The refraction colour was set to a constant colour. (CW)
  • The output colour was: C = (1-f) ·CW + f· (RL.rgb·RL.a + (1- RL.a)·(RG+RS))

A simpler implementation for the Nebula2 engine was also created. It’s named nPGwater and should be available at the same location as the DX9-demo. It uses per-vertex normals and has no local reflections but is otherwise similar to the DirectX 9 demo implementation.

Source Code

The win32 demo application with textures and source can be downloaded from:
SOURCE CODE

HLSL code for water surface rendering

float4x4         mViewProj;
float4x4         mView;
float4        view_position;
float3         watercolour;
float             LODbias;
float             sun_alfa, sun_theta, sun_shininess, sun_strength;
float             reflrefr_offset;
bool             diffuseSkyRef;

texture EnvironmentMap, FresnelMap, Heightmap, Normalmap, Refractionmap, Reflectionmap;

struct VS_INPUT
{
    float3 Pos : POSITION;
    float3 Normal : NORMAL;
    float2 tc : TEXCOORD0;
};

struct VS_OUTPUT
{
    float4 Pos : POSITION;
    float2 tc : TEXCOORD0;
    float3 normal : TEXCOORD1;
    float3 viewvec : TEXCOORD2;
    float3 screenPos : TEXCOORD3;
    float3 sun : TEXCOORD5;
    float3 worldPos : TEXCOORD6;
};

samplerCUBE sky = sampler_state
{
    Texture = <EnvironmentMap>;
    MipFilter = NONE; MinFilter = LINEAR; MagFilter = LINEAR;
    AddressU = WRAP; AddressV = WRAP; AddressW = WRAP;
};

sampler fresnel = sampler_state
{
    Texture = <FresnelMap>;
    MipFilter = NONE; MinFilter = LINEAR; MagFilter = LINEAR;
    AddressU = CLAMP; AddressV = CLAMP;
};

sampler hmap = sampler_state
{
    Texture = <Heightmap>;
    MipFilter = LINEAR; MinFilter = LINEAR; MagFilter = LINEAR;
    AddressU = CLAMP; AddressV = CLAMP;
};

sampler nmap = sampler_state
{
    Texture = <Normalmap>;
    MipFilter = LINEAR; MinFilter = LINEAR; MagFilter = LINEAR;
    AddressU = CLAMP; AddressV = CLAMP;
};

sampler refrmap = sampler_state
{
    Texture = <Refractionmap>;
    MipFilter = LINEAR; MinFilter = LINEAR; MagFilter = LINEAR;
    AddressU = CLAMP; AddressV = CLAMP;
};

sampler reflmap = sampler_state
{
    Texture = <Reflectionmap>;
    MipFilter = LINEAR; MinFilter = LINEAR; MagFilter = LINEAR;
    AddressU = CLAMP; AddressV = CLAMP;
};

/* DX9 class shaders */

VS_OUTPUT VShaderR300(VS_INPUT i)
{
    VS_OUTPUT o;
    o.worldPos = i.Pos.xyz/4;
    o.Pos = mul(float4(i.Pos.xyz,1), mViewProj);
    o.normal = normalize(i.Normal.xyz);
    o.viewvec = normalize(i.Pos.xyz - view_position.xyz/view_position.w);

    o.tc = i.tc;

    // alt screenpos
    // this is the screenposition of the undisplaced vertices (assuming the plane is y=0)
    // it is used for the reflection/refraction lookup
    float4 tpos = mul(float4(i.Pos.x,0,i.Pos.z,1), mViewProj);
    o.screenPos = tpos.xyz/tpos.w;
    o.screenPos.xy = 0.5 + 0.5*o.screenPos.xy*float2(1,-1);
    o.screenPos.z = reflrefr_offset/o.screenPos.z; // reflrefr_offset controls
                        //the strength of the distortion

    // what am i doing here? (this should really be done on the CPU as it isn’t a per-vertex operation)
    o.sun.x = cos(sun_theta)*sin(sun_alfa);
    o.sun.y = sin(sun_theta);
    o.sun.z = cos(sun_theta)*cos(sun_alfa);
    return o;
}

float4 PShaderR300(VS_OUTPUT i) : COLOR
{
    float4 ut;
    ut.a = 1;
    float3 v = i.viewvec;

    // depending on whether normals are provided per vertex or via a normal map, N is set differently
    //float3 N = i.normal;
    float3 N = 2*tex2D(nmap,i.tc)-1;
    float3 R = normalize(reflect(v,N));
    R.y = max(R.y,0);
    float4 f = tex1D(fresnel,dot(R,N));
    float3 sunlight = pow(sun_strength*pow(saturate(dot(R, i.sun)),sun_shininess)*float3(1.2, 0.4, 0.1), 1/2.2);
    float4 refl = tex2D(reflmap,i.screenPos.xy-i.screenPos.z*N.xz);
    float3 skyrefl;
    skyrefl = texCUBE(sky,R);
    float3 col = lerp(skyrefl+sunlight,refl.rgb,refl.a);
    float3 refr = watercolour; // constant colour but

    //float3 refr = tex2D(refrmap,i.screenPos.xy-i.screenPos.z*N.xz); // refraction could be used instead
    ut.rgb = lerp(refr, col, f.r);
    return ut;
}

HLSL-code for the heightmap generation (generates hmap)

float scale; // the xz-scale of the noise

struct VS_INPUT
{
    float3 Pos : POSITION;
    float2 tc : TEXCOORD0;
};

struct VS_OUTPUT
{
    float4 Pos : POSITION;
    float2 tc0 : TEXCOORD0;
    float2 tc1 : TEXCOORD1;
};

texture noise0;
texture noise1;

// samplers
sampler N0 = sampler_state
{
    texture = <noise0>;
    AddressU = WRAP;
    AddressV = WRAP;
    MIPFILTER = LINEAR;
    MINFILTER = LINEAR;
    MAGFILTER = LINEAR;
    MipMapLodBias = -1;
};

sampler N1 = sampler_state
{
    texture = <noise1>;
    AddressU = WRAP;
    AddressV = WRAP;
    MIPFILTER = LINEAR;
    MINFILTER = LINEAR;
    MAGFILTER = LINEAR;
    MipMapLodBias = -1;
};

VS_OUTPUT VShader(VS_INPUT i)
{
    VS_OUTPUT o;
    o.Pos = float4( i.tc.x*2-1,1-i.tc.y*2, 0, 1 );
    o.tc0 = scale*i.Pos.xz*0.015625;
    o.tc1 = scale*i.Pos.xz*0.25;
    return o;
}

float4 PShader(VS_OUTPUT i) : COLOR
{
    return tex2D(N0, i.tc0) + tex2D(N1, i.tc1) - 0.5;
}

HLSL-code for the normalmap generation (generates nmap)

float inv_mapsize_x,inv_mapsize_y;
float4 corner00, corner01, corner10, corner11;
float amplitude; // the amplitude of the noise.. this determine the strength of the normals

struct VS_INPUT
{
    float3 Pos : POSITION;
    float2 tc : TEXCOORD0;
};

struct VS_OUTPUT
{
    float4 Pos : POSITION;
    float2 tc : TEXCOORD0;
    float3 tc_p_dx : TEXCOORD1;
    float3 tc_p_dy : TEXCOORD2;
    float3 tc_m_dx : TEXCOORD3;
    float3 tc_m_dy : TEXCOORD4;
};

texture hmap;

sampler hsampler = sampler_state
{
    texture = <hmap>;
    AddressU = WRAP;
    AddressV = WRAP;
    MIPFILTER = NONE;
    MINFILTER = LINEAR;
    MAGFILTER = LINEAR;
};

VS_OUTPUT VShader(VS_INPUT i)
{
    VS_OUTPUT o;
    o.Pos = float4( i.tc.x*2-1,1-i.tc.y*2, 0, 1 );
    float scale = 1;

    float2 tc = i.tc + float2(-inv_mapsize_x*scale,0);
    float4 meh = lerp(lerp(corner00,corner01,tc.x),lerp(corner10,corner11,tc.x),tc.y);
    o.tc_m_dx = meh.xyz/meh.w;
    tc = i.tc + float2(+inv_mapsize_x*scale,0);
    meh = lerp(lerp(corner00,corner01,tc.x),lerp(corner10,corner11,tc.x),tc.y);
    o.tc_p_dx = meh.xyz/meh.w;
    tc = i.tc + float2(0,-inv_mapsize_y*scale);
    meh = lerp(lerp(corner00,corner01,tc.x),lerp(corner10,corner11,tc.x),tc.y);
    o.tc_m_dy = meh.xyz/meh.w;
    tc = i.tc + float2(0,inv_mapsize_y*scale);
    meh = lerp(lerp(corner00,corner01,tc.x),lerp(corner10,corner11,tc.x),tc.y);
    o.tc_p_dy = meh.xyz/meh.w;

    o.tc = i.tc;
    return o;
}

float4 PShader(VS_OUTPUT i) : COLOR
{
    float2 dx = {inv_mapsize_x,0},
        dy = {0,inv_mapsize_y};
    i.tc_p_dx.y = amplitude*tex2D(hsampler, i.tc+dx);
    i.tc_m_dx.y = amplitude*tex2D(hsampler, i.tc-dx);
    i.tc_p_dy.y = amplitude*tex2D(hsampler, i.tc+dy);
    i.tc_m_dy.y = amplitude*tex2D(hsampler, i.tc-dy);
    float3 normal = normalize(-cross(i.tc_p_dx-i.tc_m_dx, i.tc_p_dy-i.tc_m_dy));
    return float4(0.5+0.5*normal,1);
}
Bibliography
1. Real-time water rendering - introducing the projected grid concept full source reference
Unless otherwise stated, the content of this page is licensed under Creative Commons Attribution-ShareAlike 3.0 License