preface
Earlier leadership want me to do a demo of fluid particles, even I also recently were looking at some fluid, smoke, and received the task excited just came up, because there is no practice, and the effect is very good looking at, so my mind just think the whole process, is one of the hard problems of fluid particles each have their own speed and coordinates, That I use js to operation and update the data wouldn’t it be performance, at last on a business trip to attend a summit of the mind (on business) has been thinking about how to solve, suddenly thought of can make use of GPU generated image data, and then parse the data is passed to the corresponding particles don’t have to, make performance didn’t have to say, after I this excited ah ~..
Demo address (I only uploaded 6W particles online)
The performance comparison
The next two performance contrast figure 6 times is based on the CPU speed down the cross-sectional images, the first figure of fluid particles using js operation particle position (use js operation each position, total number of particles should be small, otherwise you’ll be caton), the performance analysis can see every frame yellow (js) area proportion is very big, the frame rate is not neat, the second picture is my way, As the computation is transferred to GPU, the yellow area of each frame takes up a small proportion, and the frame rate is still very uniform under the condition of 100W particles.
Train of thought
Pattern noise
In every map data image, b (speed) area is combined with two vector field (c&d) and the speed of his own operation generated, a (position) area itself, only need to coordinate combined with speed to generate new coordinate c is according to the time for the noise of the image, d need according to the incoming touch coordinates and velocity were calculated.
After the data image is generated, the process of updating the particle position begins. The idea is to find the RGB value of the corresponding AREA A in the data image according to the incoming particle index, and this RGB value is the position of the current index particle. After having the position, the next step is the routine drawing point.
Comb to here, to go back to see the entire process, js logic rarely, only create particle index, each frame was introduced into the time required to c area, and the position of the slide need to touch with the velocity vector, the other was done in the GPU, so in the performance test can hardly see the yellow (js) area, and the performance is stable and efficient.
Data picture implementation
Js part
- Create the FBO and bind the FBO to texture
const frameBuffer = this.gl.createFramebuffer();
const texture = this.gl.createTexture();
this.gl.bindFramebuffer(this.gl.FRAMEBUFFER,frameBuffer);
this.gl.activeTexture(this.gl.TEXTURE0);
this.gl.bindTexture(this.gl.TEXTURE_2D,texture);
this.gl.texImage2D(this.gl.TEXTURE_2D,0.this.gl.RGB,size,size,0.this.gl.RGB,this.halfFloat.HALF_FLOAT_OES,null);
this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_MAG_FILTER,this.gl.NEAREST);
this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_MIN_FILTER,this.gl.NEAREST);
this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_WRAP_S,this.gl.CLAMP_TO_EDGE);
this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_WRAP_T,this.gl.CLAMP_TO_EDGE);
this.gl.framebufferTexture2D(this.gl.FRAMEBUFFER,this.gl.COLOR_ATTACHMENT0,this.gl.TEXTURE_2D,texture,0);
this.gl.bindFramebuffer(this.gl.FRAMEBUFFER,null);
Copy the code
- The FBO is bound on every frame drawn
this.gl.useProgram(this.vectorData.program);
this.gl.bindFramebuffer(this.gl.FRAMEBUFFER,frameBuffer);
this.gl.clear(this.gl.COLOR_BUFFER_BIT); .this.gl.drawArrays(this.gl.TRIANGLE_STRIP,0.4);
this.gl.bindFramebuffer(this.gl.FRAMEBUFFER,null);
Copy the code
- Once the data is drawn, it is passed to particle Program as texture
this.gl.useProgram(this.particleData.program);
this.gl.activeTexture(this.gl.TEXTURE0);
// Pass in the data image
this.gl.uniform1i(this.gl.getUniformLocation(this.particleData.program,'uTexture'),0);
this.gl.bindTexture(this.gl.TEXTURE_2D,this.vectorTexture);
// Pass in the particle index data
this.gl.bindBuffer(this.gl.ARRAY_BUFFER,this.particleData.buffer);
this.gl.vertexAttribPointer(this.particleData.aPosition,2.this.gl.FLOAT,false.0.0);
this.gl.drawArrays(this.gl.POINTS,0.this.particleLength);
Copy the code
Shader part of the data image mainly uses the chip shader to set the color of each pixel
#ifdef GL_ES
precision highp float;
#endif
#define3.14159265358979323846 PI?
#define OCTAVE_NUM 5
#define SIZE 128.
uniform sampler2D uTexture;// This is the first time that you have done this
uniform float uTime;/ / time
uniform vec4 uPointer;// Finger coordinates and velocity vectors
varying vec2 vPosition;// The position passed in from the vertex shader
const float pixelSize = 1./SIZE;
vec2 random (in vec2 p) {
// pseudo random function
return fract(
sin(
vec2(
dot(p, vec2(3.3.6.1)),
dot(p, vec2(5.7.4.7)))) *. 5
);
}
float noise(in vec2 p){
// Noise function
vec2 i = floor(p);
vec2 f = fract(p);
float a = dot(random(i),f);
float b = dot(random(i + vec2(1..0.)),f - vec2(1..0.));
float c = dot(random(i + vec2(0..1.)),f - vec2(0..1.));
float d = dot(random(i + vec2(1..1.)),f - vec2(1..1.));
vec2 u = smoothstep(0..1.,f);
return mix(mix(a,b,u.x),mix(c,d,u.x),u.y)+. 5;
}
vec4 randomRate(in vec3 pos){
// Calculate the c vector field speed
vec3 _pos = pos*vec3(. 5.. 5.1);
vec3 pixelPos = _pos*SIZE;
_pos.y += . 5;
vec3 i = floor(pixelPos);
vec3 f = fract(pixelPos);
// Select the lower left corner of the offset feature because the lower right corner has a problem when ==1
vec3 a = texture2D(uTexture,_pos.xy).xyz;
vec3 b = texture2D(uTexture,_pos.xy+vec2(-pixelSize,0)).xyz;
vec3 c = texture2D(uTexture,_pos.xy+vec2(0,pixelSize)).xyz;
vec3 d = texture2D(uTexture,_pos.xy+vec2(-pixelSize,pixelSize)).xyz;
vec3 u = smoothstep(0..1.,f);
vec3 _mix = mix(mix(a,b,u.x),mix(c,d,u.x),u.y);
_mix-=. 5;
return vec4(_mix,1);
}
vec4 touchRate(in vec3 pos){
// Calculate d vector field speed
vec3 _pos = pos*vec3(. 5.. 5.1);
vec3 pixelPos = _pos*SIZE;
_pos.y += . 5;
_pos.x += . 5;
vec3 i = floor(pixelPos);
vec3 f = fract(pixelPos);
vec3 a = texture2D(uTexture,_pos.xy).xyz;
vec3 b = texture2D(uTexture,_pos.xy+vec2(pixelSize,0)).xyz;
vec3 c = texture2D(uTexture,_pos.xy+vec2(0,pixelSize)).xyz;
vec3 d = texture2D(uTexture,_pos.xy+vec2(pixelSize)).xyz;
vec3 u = smoothstep(0..1.,f);
vec3 _mix = mix(mix(a,b,u.x),mix(c,d,u.x),u.y);
_mix-=. 5;
return vec4(_mix,1);
}
void main(a){
vec4 color = texture2D(uTexture,vPosition);
if(vPosition.y<. 5) {if(vPosition.x<. 5) {// calculate a region xyz = RGB
vec4 v = texture2D(uTexture,vPosition+vec2(. 5.0))-. 5;
v/=pow(2..12.);
v.xy *= (1.-cos(color.z*PI*1.5)) /3.;
if(color.z+v.z<=. 0) {// Reset the position to solve the problem of particle coincidence after a period of time
gl_FragColor = vec4(vPosition*2..1.1);
}else{ gl_FragColor = fract(color+v); }}else{
//b field calculation
// Get the current particle coordinates
vec3 currentPos = texture2D(uTexture,vPosition+vec2(-. 5.0)).xyz;
gl_FragColor=color+(randomRate(currentPos)+touchRate(currentPos))*512.;
gl_FragColor = . 5+(gl_FragColor-. 5) /5.; }}else{
vec2 offset = vec2(sin(uTime/27.) *sin(uTime),cos(uTime/17.) *cos(uTime)*. 5);
gl_FragColor=vec4(0.5.0.5.0.5.1);
if(vPosition.x<. 5) {//c field calculation
gl_FragColor.rg=vec2(noise(vPosition*4.+offset*2.3),noise(vPosition*3.+offset.yx));
gl_FragColor.b = 4.;
}else{
//d field calculation
vec2 _point = uPointer.xy/2.+vec2(. 5);
float _len = distance(_point,vPosition);
vec2 _color = (color.xy-vec2(. 5)) *99.;
if(length(_color)<. 04){
_color = vec2(0);
}
gl_FragColor.rg = _color;
if(uPointer.x>=. 0&&_len<.05){
gl_FragColor.rg+=(1.-_len/.05)*uPointer.zw;
}
gl_FragColor.rg += vec2(. 5);
}
}
gl_FragColor.a = 1.;
}
Copy the code
Feel a lot of code ~.. If you don’t understand, I’ll point it out. Texture allows you to add logic to the GPU to create smooth effects.