3

I use C# and OpenTK as my OpenGL implementation. My Vertex layout consists of 6 Doubles and one Float:

public struct Vertex
{
    //Positions
    public double X;
    public double Y;
    public double Z;
    //ContourEdge
    public float IsContourEdge;
    //Normals
    public double NX;
    public double NY;
    public double NZ;
    public Vertex(float x, float y, float z) : this()
    {
        X = x;
        Y = y;
        Z = z;
        IsContourEdge = 0f;
    }

    public Vertex(float x, float y, float z, float nX, float nY, float nZ) : this()
    {
        X = x;
        Y = y;
        Z = z;
        NX = nX;
        NY = nY;
        NZ = nZ;
        IsContourEdge = 0f;
    }
    public static int IsContourEdgeOffset()
    {
        return sizeof (double) * 3;
    }
    public static int NormalOffset()
    {
        return sizeof(double) * 4;
    }
}

And I render my vertices like this:

//Bind vao, vbo and ebo
GL.BindVertexArray(bufferObject.Vao);

GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.Vbo);
GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(vertices.Length * Marshal.SizeOf(typeof(Vertex))), vertices, BufferUsage);

// Vertex Positions
GL.EnableVertexAttribArray(0);
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Double, false, Marshal.SizeOf(typeof(Vertex)), IntPtr.Zero);
// Is Contour Edge
GL.EnableVertexAttribArray(1);
GL.VertexAttribPointer(1, 1, VertexAttribPointerType.Float, false, Marshal.SizeOf(typeof(Vertex)), Vertex.IsContourEdgeOffset());
// Vertex Normals
if (hasNormals)
{
    GL.EnableVertexAttribArray(2);
    GL.VertexAttribPointer(2, 3, VertexAttribPointerType.Double, false, Marshal.SizeOf(typeof(Vertex)), Vertex.NormalOffset());
}

//Unlink buffers
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.BindVertexArray(0);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, 0);

Everything works fine here. But the offset of the normals doesn't make sense for me. In my Vertex struct I said:

public static int NormalOffset()
{
    return sizeof(double) * 4;
}

But shouldn't it be

public static int NormalOffset()
{
    return 3* sizeof(double) * sizeof(float);
}

since IsContourEdge is a float and a I also pass it as a float (VertexAttribPointerType.Float) to my vertex shader?

Anyway, if I use this NormalOffset instead of the previous one, the normals don't work anymore. The only explanation I have is that OpenGL expands the size of a Float to the size of a Double. Is that right?

Just for completeness, here is the vertex shader code:

#version 330 core
layout (location = 0) in vec3 position; //Vertices and normals are in world coordinate space
layout (location = 1) in float isContourEdge;
layout (location = 2) in vec3 normal;

out VS_OUT {
    vec3 Normal;
    vec3 FragPos;
    vec3 VertWorldNormal;
    vec3 VertWorldPos;
    float ExcludeEdge;
} vs_out;

uniform mat4 view;
uniform mat4 proj;

void main()
{
    gl_Position = proj * view *  vec4(position, 1.0f);
    vs_out.FragPos = vec3(view * vec4(position, 1.0f));
    vs_out.Normal = vec3(view * vec4(normal, 0));
    vs_out.ExcludeEdge = isContourEdge;

    vs_out.VertWorldNormal = normal;
    vs_out.VertWorldPos = position;
} 
enne87
  • 601
  • 2
  • 7
  • 15

1 Answers1

2

welcome to the world of padding. Doubles are 8 byte aligned so when a 4 byte float gets inserted between 2 doubles some padding gets inserted to keep the next double aligned to a 8 byte boundary.

You can make sure the offset is always correct (even when alignment changes in future versions) by using the Marshal.OffsetOf(Type, String) method.

ratchet freak
  • 5,950
  • 16
  • 28