The original article was first published on the wechat official account Byteflow
OpenGL ES 3D model loading and rendering
The previous section briefly introduced the data structure of Obj, a commonly used 3D model file, and the compilation of Assimp, a model loading library. This section mainly introduces how to use Assimp to load 3D model files and render 3D models.
3D model design is generally composed of many small models splicing and combining into a complete large model, one small model as an independent rendering unit, we call these small models Mesh.
As an independent rendering unit, the grid should contain at least one set of vertex data, each vertex data contains a position vector, a normal vector and a texture coordinate. With the texture coordinate, it is necessary to specify the texture corresponding material for the grid, as well as the index of the vertices when drawing.
We can define a vertex for the Mesh:
struct Vertex {
// Position vector
glm::vec3 Position;
/ / normal vector
glm::vec3 Normal;
// Texture coordinates
glm::vec2 TexCoords;
};
Copy the code
We also need a structure that describes the texture information:
struct Texture
{
GLuint id;// Texture ID, created in OpenGL environment
String type; // Texture type (Diffuse or specular)
};
Copy the code
A grid, as an independent rendering unit, needs to contain at least one set of vertex data as well as the index and texture of the vertex, which can be defined as follows:
class Mesh
{
Public:
vector<Vertex> vertices;// a set of vertices
vector<GLuint> indices;// The index corresponding to the vertex
vector<Texture> textures;/ / texture
Mesh(vector<Vertex> vertices, vector<GLuint> indices, vector<Texture> texture);
Void Draw(Shader shader);
private:
GLuint VAO, VBO, EBO;
void initMesh(a);
void Destroy(a);
}
Copy the code
We create the corresponding VAO, VBO, and EBO through initMesh, initialize the buffer, and set the uniform variables of the shader program.
Mesh(vector<Vertex> vertices, vector<GLuint> indices, vector<Texture> textures)
{
this->vertices = vertices;
this->indices = indices;
this->textures = textures;
this->initMesh();
}
void initMesh(a)
{
// Generate VAO, VBO, EBO
glGenVertexArrays(1, &this->VAO);
glGenBuffers(1, &this->VBO);
glGenBuffers(1, &this->EBO);
// Initialize the buffer
glBindVertexArray(this->VAO);
glBindBuffer(GL_ARRAY_BUFFER, this->VBO);
glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(Vertex),
&this->vertices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this->EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->indices.size() * sizeof(GLuint),
&this->indices[0], GL_STATIC_DRAW);
// Set the vertex coordinate pointer
glEnableVertexAttribArray(0);
glVertexAttribPointer(0.3, GL_FLOAT, GL_FALSE, sizeof(Vertex),
(GLvoid*)0);
// Set the normal pointer
glEnableVertexAttribArray(1);
glVertexAttribPointer(1.3, GL_FLOAT, GL_FALSE, sizeof(Vertex),
(GLvoid*)offsetof(Vertex, Normal));
// Set the vertex texture coordinates
glEnableVertexAttribArray(2);
glVertexAttribPointer(2.2, GL_FLOAT, GL_FALSE, sizeof(Vertex),
(GLvoid*)offsetof(Vertex, TexCoords));
glBindVertexArray(0);
}
// Destroy the texture and buffer objects
void Destroy(a)
{
for (int i = 0; i < textures.size(); ++i) {
glDeleteTextures(1, &textures[i].id);
}
glDeleteBuffers(1, &EBO);
glDeleteBuffers(1, &VBO);
glDeleteVertexArrays(1, &VAO);
VAO = EBO = VBO = GL_NONE;
}
Copy the code
The preprocessor offsetof is used to calculate the offsetof a structure property, taking the structure as its first argument. The second argument is the variable of the structure name. The function returns the offsetof the variable from the structure. For example: offsetof(Vertex, Normal) returns 12 bytes, i.e. 3 * sizeof(float).
The vertex shader we used (simplified) :
#version 300 es
layout (location = 0) in vec3 a_position;
layout (location = 1) in vec3 a_normal;
layout (location = 2) in vec2 a_texCoord;
out vec2 v_texCoord;
uniform mat4 u_MVPMatrix;
void main(a)
{
v_texCoord = a_texCoord;
vec4 position = vec4(a_position, 1.0);
gl_Position = u_MVPMatrix * position;
}
Copy the code
The fragment shaders used vary depending on the number and type of textures used. If there is only one diffuse fragment shader like this:
#version 300 es
out vec4 outColor;
in vec2 v_texCoord;
uniform sampler2D texture_diffuse1;
void main(a)
{
outColor = texture(texture_diffuse1, v_texCoord);
}
Copy the code
If we have 3 Diffuse textures and 3 Specular textures in a mesh, the sampler declaration in the corresponding fragment shader is as follows:
uniform sampler2D texture_diffuse1;
uniform sampler2D texture_diffuse2;
uniform sampler2D texture_diffuse3;
uniform sampler2D texture_specular1;
uniform sampler2D texture_specular2;
uniform sampler2D texture_specular3;
Copy the code
To sum up, we need to use different fragment shaders and vertex shaders depending on the number and type of textures in the Mesh and the lighting requirements of the model.
Mesh rendering logic:
// Render the mesh
void Draw(Shader shader)
{
unsigned int diffuseNr = 1;
unsigned int specularNr = 1;
// Iterate over each texture and determine the sampler variable name based on the number and type of texture
for(unsigned int i = 0; i < textures.size(); i++)
{
glActiveTexture(GL_TEXTURE0 + i); // active proper texture unit before binding
string number;
string name = textures[i].type;
if(name == "texture_diffuse")
number = std::to_string(diffuseNr++);
else if(name == "texture_specular")
number = std::to_string(specularNr++); // transfer unsigned int to stream
glUniform1i(glGetUniformLocation(shader.ID, (name + number).c_str()), i);
// and finally bind the texture
glBindTexture(GL_TEXTURE_2D, textures[i].id);
}
// Draw the grid
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
glActiveTexture(GL_TEXTURE0);
}
Copy the code
The logic of the Shader class simply includes Shader program creation and destruction and uniform variable Settings.
class Shader
{
public:
unsigned int ID;// ID of the shader program
Shader(const char* vertexStr, const char* fragmentStr)
{
// Create a shader program
ID = GLUtils::CreateProgram(vertexStr, fragmentStr);
}
void Destroy(a)
{
// Destroy the shader program
GLUtils::DeleteProgram(ID);
}
void use(a)
{
glUseProgram(ID);
}
void setFloat(const std: :string &name, float value) const
{
glUniform1f(glGetUniformLocation(ID, name.c_str()), value);
}
void setVec3(const std: :string &name, float x, float y, float z) const
{
glUniform3f(glGetUniformLocation(ID, name.c_str()), x, y, z);
}
void setMat4(const std: :string &name, const glm::mat4 &mat) const
{
glUniformMatrix4fv(glGetUniformLocation(ID, name.c_str()), 1, GL_FALSE, &mat[0] [0]); }};Copy the code
Previously, we know that a Model contains many meshes, and each Mesh is rendered independently to form the whole Model. The Model class can be defined as follows:
class Model { public: Model(GLchar* path) { loadModel(path); } // Render the model, i.e. render each grid in turn void Draw(Shader Shader); Void Destroy(); Private: // The Mesh contained in the model vector<Mesh> meshes; // Model file directory string directory; Void loadModel(string path); Void processNode(aiNode* node, const aiScene* scene); ProcessMesh (aiMesh* Mesh, const aiScene* scene); Vector <Texture> loadMaterialTextures(aiMaterial* mat, aiTextureType Type, String typeName); };Copy the code
Loading the 3D model with Assimp is relatively simple, and the final model is loaded into an aiScene object defined in Assimp. AiScene contains an aiNode object (the root node) in addition to the mesh and materials, and then we need to traverse the mesh of each child node.
#include "assimp/Importer.hpp"
#include "assimp/scene.h"
#include "assimp/postprocess.h"
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, aiProcess_Triangulate | aiProcess_FlipUVs);
Copy the code
The aiProcess_Triangulate parameter indicates that if the model is not (entirely) made up of triangles, it should convert the original geometry of all models to triangles; AiProcess_FlipUVs represents flipping texture coordinates based on the Y-axis.
Functions in the Model class to load the Model:
void loadModel(string const &path)
{
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, aiProcess_Triangulate | aiProcess_FlipUVs);
if(! scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || ! scene->mRootNode)// if is Not Zero
{
LOGCATE("Model::loadModel path=%s, assimpError=%s", path, importer.GetErrorString());
return;
}
directory = path.substr(0, path.find_last_of('/'));
// Process the node
processNode(scene->mRootNode, scene);
}
// Process all nodes recursively
void processNode(aiNode *node, const aiScene *scene)
{
for(unsigned int i = 0; i < node->mNumMeshes; i++)
{
aiMesh* mesh = scene->mMeshes[node->mMeshes[i]];
if(mesh ! =nullptr)
meshes.push_back(processMesh(mesh, scene));
}
for(unsigned int i = 0; i < node->mNumChildren; i++) { processNode(node->mChildren[i], scene); }}// Generate a Mesh
Mesh processMesh(aiMesh* mesh, const aiScene* scene)
{
vector<Vertex> vertices;
vector<unsigned int> indices;
vector<Texture> textures;
for(GLuint i = 0; i < mesh->mNumVertices; i++)
{
Vertex vertex;
// Handle vertex coordinates, normals, and texture coordinates. vertices.push_back(vertex); }// Handle vertex indexes
for(unsigned int i = 0; i < mesh->mNumFaces; i++)
{
aiFace face = mesh->mFaces[i];
for(unsigned int j = 0; j < face.mNumIndices; j++)
indices.push_back(face.mIndices[j]);
}
// Handle materials
if(mesh->mMaterialIndex >= 0)
{
aiMaterial* material = scene->mMaterials[mesh->mMaterialIndex];
vector<Texture> diffuseMaps = loadMaterialTextures(material, aiTextureType_DIFFUSE, "texture_diffuse"); textures.insert(textures.end(), diffuseMaps.begin(), diffuseMaps.end()); . }return Mesh(vertices, indices, textures);
}
Copy the code
When loading the texture in native layer, we use OpenCV to decode the image and generate the texture object:
unsigned int TextureFromFile(const char *path, const string &directory)
{
string filename = string(path);
filename = directory + '/' + filename;
unsigned int textureID;
glGenTextures(1, &textureID);
unsigned char *data = nullptr;
LOGCATE("TextureFromFile Loading texture %s", filename.c_str());
// Use OpenCV to decode the image
cv::Mat textureImage = cv::imread(filename);
if(! textureImage.empty()) {// OpenCV is decoded to BGR by default, here converted to RGB
cv::cvtColor(textureImage, textureImage, CV_BGR2RGB);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, textureImage.cols,
textureImage.rows, 0, GL_RGB, GL_UNSIGNED_BYTE,
textureImage.data);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
GO_CHECK_GL_ERROR();
} else {
LOGCATE("TextureFromFile Texture failed to load at path: %s", path);
}
return textureID;
}
Copy the code
To draw a model is to traverse each Mesh to draw:
void Draw(Shader shader)
{
for(unsigned int i = 0; i < meshes.size(); i++)
meshes[i].Draw(shader);
}
Copy the code
Finally, here is an example of how to use the Model class:
// Initialize and load the model
m_pModel = new Model("/sdcard/model/poly/Apricot_02_hi_poly.obj");
m_pShader = new Shader(vShaderStr, fShaderStr);
// Draw the model
glClearColor(0.5 f.0.5 f.0.5 f.1.0 f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
UpdateMVPMatrix(m_MVPMatrix, m_AngleX, m_AngleY, (float)screenW / screenH);
m_pShader->use();
m_pShader->setMat4("u_MVPMatrix", m_MVPMatrix);
m_pModel->Draw((*m_pShader));
// Destroy the object
if(m_pModel ! =nullptr) {
m_pModel->Destroy();
delete m_pModel;
m_pModel = nullptr;
}
if(m_pShader ! =nullptr) {
m_pShader->Destroy();
delete m_pShader;
m_pShader = nullptr;
}
Copy the code
Implementation code path: NDK_OpenGLES_3_0