I have this vertex array of a cube
float vertex_coordinates [] = {
-12.43796, -12.43796, 12.43796, -12.43796, 12.43796, 12.43796, 12.43796, 12.43796, 12.43796,
instead of (+-)12.43796 use (+-)1
then apply a glScalef operation on your modelview matrix of 12.43796
I doubt this would speed up your code, however. All it will do is reduce your vertex array to half of its original size.
In a preprocessing step we calculate the min and max of the object and use this to maximize the utilization of the precision in a short:
float modelMin[3] = {FLT_MAX, FLT_MAX, FLT_MAX}; //or std::numeric_limits<float>
float modelMax[3] = {-FLT_MAX, -FLT_MAX, -FLT_MAX};
for (int i = 0; i < size; ++i) {
for (int j = 0; j < 3; ++j) {
const float v = vertex_coordinates[i * 3 + j];
modelMin[j] = std::min(modelMin[j], v);
modelMax[j] = std::max(modelMax[j], v);
}
}
short* short_coordinates = new short[size * 3];
for (int i = 0; i < size; ++i) {
for (int j = 0; j < 3; ++j) {
const float src = vertex_coordinates[i * 3 + j];
short& dst = short_coordinats[i * 3 + j];
dst = (short)floorf(((src - modelMin[j]) / (modelMax[j] - modelMin[j])) * 65536.0f - 32768.0f + 0.5f);
}
}
And when drawing we do the following:
const float scale[3], bias[3];
for (int i = 0; i < 3; ++i) {
scale[i] = (modelMax[j] - modelMin[j]) / 65536.0f;
bias[i] = (32768.0f / 65536.0f) * (modelMax[j] - modelMin[j]) + modelMin[j];
}
glTranslatef(bias[0], bias[1], bias[2]);
glScalef(scale[0], scale[1], scale[2]);
glVertexPointer(3, GL_SHORT, 0, short_coordinates);
glDrawArrays(GL_TRIANGLES, 0, size);
/A.B.