WebRTC series

  1. WebRTC source research (1) WebRTC architecture

  2. WebRTC source code research (2) WebRTC source directory structure

  3. WebRTC source research (3) WebRTC operation mechanism

4.WebRTC source research (4) Web server working principle and common protocol basis

  1. WebRTC source code research (5) Nodejs build environment

  2. WebRTC source research (6) Create a simple HTTP service

  3. WebRTC source code Research (7)

  4. [WebRTC source code Research (8)

  5. [WebRTC source code research (9)

  6. [WebRTC source code Research (10)

  7. WebRTC source research (11) access to audio and video equipment

  8. WebRTC source research (12) video constraints

  9. WebRTC source code study (13) audio constraints

  10. WebRTC source research (14) video effects

  11. WebRTC source study (15) get the figure from the video

  12. WebRTC source code study (16) only audio data acquisition

  13. WebRTC source research (17) MediaStreamAPI get video constraints

  14. WebRTC source research (18) WebRTC recording video principle

  15. WebRTC source research (19) WebRTC record acquisition plane data

  16. WebRTC source code research (20) WebRTC signaling server principle

  17. WebRTC source code research (21) WebRTC signaling server implementation

WebRTC source research (19) WebRTC record acquisition plane data

1. WebRTC collection API

There is an API in WebRTC that can be used to get the desktop: getDisplayMedia

var promise = navigator.mediaDevices.getDisplayMedia(constraints);
Copy the code

Constraints of the optional

Constraints are the same as in the getUserMedia function.

2. Collect plane data

Collect flat data: This feature is an experimental project in Chrome, so it is only available for the latest projects.

To do this, open the browser and set chrome://flags/#enable-experimental-web-platform-features

As shown below:

Let’s look at the specific JS code as follows:

'use strict'
 
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
// Get the video label
var videoplay = document.querySelector('video#player');
// Get the audio label
var audioplay = document.querySelector('audio#audioplayer');
 
//div
var divConstraints = document.querySelector('div#constraints');
 
// Define a binary array
var buffer;
var mediaRecorder;
 
// Record Video recording playback download button
var recvideo = document.querySelector('video#recplayer');
var btnRecord = document.querySelector('button#record');
var btnPlay = document.querySelector('button#recplay');
var btnDownload = document.querySelector('button#download');
 
//filter effect selection
var filtersSelect = document.querySelector('select#filter');
 
//picture gets the elements associated with the video frame picture
var snapshot = document.querySelector('button#snapshot');
var picture = document.querySelector('canvas#picture');
picture.width = 640;
picture.height = 480;
 
// deviceInfos is an array of device information
function gotDevices(deviceInfos){
  Deviceinfo = deviceinfo = deviceinfo = deviceInfo = deviceInfo = deviceInfo
	deviceInfos.forEach(function(deviceinfo){
    // Create each item
		var option = document.createElement('option');
		option.text = deviceinfo.label;
		option.value = deviceinfo.deviceId;
	
		if(deviceinfo.kind === 'audioinput') {// Audio input
			audioSource.appendChild(option);
		}else if(deviceinfo.kind === 'audiooutput') {// Audio output
			audioOutput.appendChild(option);
		}else if(deviceinfo.kind === 'videoinput') {// Video inputvideoSource.appendChild(option); }})}// What do we do with the stream? In gotMediaStream we pass a parameter, the stream,
// This stream actually contains both audio and video tracks, because we set video and audio to be captured via Constraints
// Let's directly assign this stream to the HTML assigned video tag
// The user has agreed to access the audio/video device
function gotMediaStream(stream){  
  	// audioplay.srcObject = stream;
  videoplay.srcObject = stream; // Specify that the data source comes from stream, so that the video tag can play the video and audio after collecting this data
  // Get the track of the video by stream. Then we get the track of all the video streams. Here we only get the first one in the list
  var videoTrack = stream.getVideoTracks()[0];
  // We can call the track method after we get track
  var videoConstraints = videoTrack.getSettings(); // So you can get all the video constraints
  // Convert this object to JSON format
  // The first is videoConstraints, the second is empty, and the third represents an indentation of 2 Spaces
  divConstraints.textContent = JSON.stringify(videoConstraints, null.2);
  
  window.stream = stream;
 
  // After we collect audio and video data, we return a Promise
  return navigator.mediaDevices.enumerateDevices();
}
 
function handleError(err){
	console.log('getUserMedia error:', err);
}
function start() {
// Check whether the browser supports it
if(! navigator.mediaDevices || ! navigator.mediaDevices.getDisplayMedia){// Check whether screen recording is supported
  console.log('getUserMedia is not supported! ');
}else{
  // deviceId is obtained
  var deviceId = videoSource.value; 
  // This is the constraint parameter, normally we only need to use video or audio
	// For videos, you can do what we just said
	/** * video: {width: 650, // width: 650, // width: 650, // width: 650, // width: 650, // 'enviroment', // Set to rear camera deviceId: deviceId? DeviceId: undefined // If deviceId is not empty, set the value directly. If deviceId is empty, undefined}, */
  var constraints = { // Capture video and audio at the same time
    video : true.audio : false 
  }
  // Call the recording API
  navigator.mediaDevices.getDisplayMedia(constraints) // So you can grab the desktop data
    .then(gotMediaStream)  // Using the Promise concatenation method, the fetch stream succeeded
    .then(gotDevices)
    .catch(handleError);
}
}
 
start();
 
// When I select the camera, it triggers an event,
// AFTER I call start I need to change the constraints
videoSource.onchange = start;
 
// Select the method of effect
filtersSelect.onchange = function(){
	videoplay.className = filtersSelect.value;
}
 
// Click the button to get the video frame image
snapshot.onclick = function() {
  picture.className = filtersSelect.value;
  // Call the Canvas API to get the context. The image is 2d, so 2d, so we get its context
  // Call drawImage to draw an image, the first parameter is the video, so here we have videoPlay,
  // the second and third arguments are the start point 0,0
  // The fourth and fifth parameters indicate the height and width of the image
	picture.getContext('2d').drawImage(videoplay, 0.0, picture.width, picture.height);
}
// 
function handleDataAvailable(e){  When we click Record, data will be continuously retrieved from this event function
	if(e && e.data && e.data.size > 0){
     buffer.push(e.data);  // Place e.data in the binary array
    // This buffer should be created when we start recording}}// 2
function startRecord(){
	buffer = []; // Define an array
	var options = {
		mimeType: 'video/webm; codecs=vp8' // Record video with vp8 encoding
	}
	if(! MediaRecorder.isTypeSupported(options.mimeType)){// Check whether the mimeType format is supported by the browser
		console.error(`${options.mimeType}is not supported! `);
		return;	
	}
  try{ // Prevent recording exceptions
    // 5, define the global object mediaRecorder above, so that later stop recording can be used
		mediaRecorder = new MediaRecorder(window.stream, options); // Call the recording API // window.stream in gotMediaStream
	}catch(e){
		console.error('Failed to create MediaRecorder:', e);
		return;	
  }
  // 4. The event handler receives the data we recorded. When we collect the data, we should store it
  mediaRecorder.ondataavailable = handleDataAvailable; 
	mediaRecorder.start(10); // The start method passes a time slice and stores one piece of data every time slice
}
// 3. Stop recording
function stopRecord(){
  // 6. Stop recording
	mediaRecorder.stop();
}
 
// 1
btnRecord.onclick = () = >{
	if(btnRecord.textContent === 'Start Record') {// Start recording
		startRecord();	// Call startRecord to start recording
		btnRecord.textContent = 'Stop Record'; // Modify button's copy
		btnPlay.disabled = true; // The play button status is disabled
		btnDownload.disabled = true; // Download button status is disabled
	}else{ // End recording
		stopRecord(); // Stop recording
		btnRecord.textContent = 'Start Record';
		btnPlay.disabled = false; // It can play after stopping recording
		btnDownload.disabled = false; // Stop recording can be downloaded}}// Click Play video
btnPlay.onclick = () = > {
	var blob = new Blob(buffer, {type: 'video/webm'});
	recvideo.src = window.URL.createObjectURL(blob);
	recvideo.srcObject = null;
	recvideo.controls = true;
	recvideo.play();
}
 
// Download the video
btnDownload.onclick = () = > {
	var blob = new Blob(buffer, {type: 'video/webm'});
	var url = window.URL.createObjectURL(blob);
	var a = document.createElement('a');
 
	a.href = url;
	a.style.display = 'none';
	a.download = 'aaa.webm';
	a.click();
}
Copy the code