3

I have an OpenGL program that works fine on nVidia card, but produces Access violation error and GL_INVALID_OPERATION at glDrawElements on AMD gpu.

For starters I have this geometry shader which now won't compile.

#version 330

// Input is triangles, output is triangle strip.
layout(triangles) in;
layout(triangle_strip, max_vertices = 6) out;

// Data passed from vertex shader.
in VertexData
{
   vec3 normal;
   vec3 view;
}  inData[];

// Data passed to fragment shader. 
out FragData
{
   vec3 normal;
   vec3 view;
} frag;

// Uniforms.
uniform mat4 modelview_matrix;
uniform mat4 projection_matrix;


// Array of indexes for triangles to be split.
uniform int index[256];
uniform vec3 points[256];

uniform float split_factor;

uniform int tear;

void create_split ();
void make_face (vec4 a, vec4 b, vec4 c, vec3 normalA, vec3 viewA, vec3 normalB, vec3 viewB, vec3 normalC, vec3 viewC);

void create_split ()
{
   float f = gl_PrimitiveIDIn * split_factor;
   if ( f == 0.0f)
   {
       f = 0.2f;
   } else if ( f > 0.9f ) 
   {
       f = 0.9f;
   }

   vec4 midPointA = (gl_in[0].gl_Position + gl_in[1].gl_Position) * f ;
   vec4 midPointD = (gl_in[3].gl_Position + gl_in[4].gl_Position) * f;


   vec3 normalA = inData[0].normal;
   vec3 viewA = -midPointA.xyz;

   vec3 normalB = inData[1].normal;
   vec3 viewB = -midPointB.xyz;

   vec3 normalC = inData[2].normal;
   vec3 viewC = -midPointC.xyz;

   make_face (gl_in[0].gl_Position, midPointA, midPointD, inData[0].normal, inData[0].view, normalA, viewA, normalC, viewC);
   make_face (gl_in[0].gl_Position, midPointD, gl_in[4].gl_Position, inData[0].normal, inData[0].view, normalA, viewA, normalC, viewC);
}

void make_face (vec4 a, vec4 b, vec4 c, vec3 normalA, vec3 viewA, vec3 normalB, vec3 viewB, vec3 normalC, vec3 viewC)
{
   gl_Position = a;
   frag.normal = normalA;
   frag.view = viewA;
   EmitVertex();

   gl_Position = b;
   frag.normal = normalB;
   frag.view = viewB;
   EmitVertex();

   gl_Position = c;
   frag.normal = normalC;
   frag.view = viewC;
   EmitVertex();

   EndPrimitive();
}


void main()
{
    int i = 0;
    bool flag = false;

   if ( tear == 1 )
   {
      while(i < MAX && !flag)
      {
          if (gl_PrimitiveIDIn == index[i])
          {
              if (i > 0 && i < 256)
              {
                  if (index[i+1] - index[i] < 100)
                  {
                      create_split();
                      flag = true;
                  }
               }else if (i == 0 || i == 256)
               {
                  create_split();
                  flag = true;
               }

           }
           i++;
       }
   }


   i = 0;
   if (!flag)
   {
       for (i = 0; i < gl_in.length(); i++)
       {
           gl_Position = gl_in[i].gl_Position;
           frag.normal = inData[i].normal;
           frag.view = inData[i].view;
           EmitVertex();
           if ( i == 2)
           {
               EndPrimitive();
           }
       }
       EndPrimitive();
   }
}

Which produces those errors:

Compiling the geometry shader : _shaders/gs_smooth.glsl ...
Error: The shader _shaders/gs_smooth.glsl failed to compile.
Shader info log for GL index 4
Geometry shader failed to compile with the following errors:
ERROR: 0:55: error(#147) "[" array index out of range: '3'
ERROR: 0:55: error(#147) "[" array index out of range: '4'
ERROR: 0:82: error(#147) "[" array index out of range: '4'
ERROR: error(#273) 3 compilation errors.  No code generated

I've tried googling but nothing interesting showed up. This error is not happening on the nVidia card.

Also AMD card is R9 270 while nVidia is GTX970.

Thanks.

mkanakis
  • 220
  • 1
  • 8

1 Answers1

4

https://www.khronos.org/opengl/wiki/Geometry_Shader

With input of triangles, gl_in has a vertex count of 3. This is also covered in section 4.3.8.1 of the version 330 GLSL specification (I'm specifically citing this version because of your #version 330):

All geometry shader input unsized array declarations will be sized by an earlier input layout qualifier, when present, as per the following table.

Layout: triangles, Size of Input Arrays: 3

The intrinsically declared input array gl_in[] will also be sized by any input layout declaration. Hence, the expression gl_in.length() will return the value from the table above.

Since you're using an unsized input layout declaration:

layout(triangles) in;

But attempting to access elements 3 and 4:

vec4 midPointD = (gl_in[3].gl_Position + gl_in[4].gl_Position) * f;

And:

make_face (gl_in[0].gl_Position, midPointD, gl_in[4].gl_Position, ...

So you're overflowing your GS input, and the AMD behaviour is actually more correct. NVIDIA is being excessively permissive and is accepting code that goes against the specification; you should correct your code.

Maximus Minimus
  • 20,144
  • 2
  • 38
  • 66
  • You are correct. Still.. back to the original problem, I was trying to see why glDraw with TRIANGLES_ADJACENCY was throwing bad access reading location and changed it to TRIANGLES but forgot about the error which you just found and solved. So regarding this error yes this is the solution. I will open a new question for my other one. Thanks. – mkanakis Dec 22 '16 at 22:42