Recently, WebRTC video streaming was implemented to achieve the screen recording function, essentially using the native MediaRecorder API.

About MediaRecorder can look at the document: MediaRecorder

Some problems encountered are solved

A progress bar cannot be displayed when a WebM video is played for the first time. The progress bar (video duration) is displayed only after the second time

The official Chrome flag Won’t Fix, which Chrome presumably doesn’t consider a bug. If the length of the video is not specified in the header of the file, the entire file will need to be read

Solution:

Manually calculate the video length and assign it to the BLOB.

Use the fix-webm-Duration library to complete the duration field. You need to record duration by yourself, which is not very accurate and still has an error within more than 1s, but it is less invasive and easy to solve

After completing the progress bar of webM video, use the left and right keys of the keyboard to accelerate and decelerate if the webM video still cannot automatically focus

Normal videos can be accelerated and decelerated by using the focus method of the native video label by using the left and right keys of the keyboard. However, webM video is not supported by nature, even with the progress bar

Solution:

Set currentTime with JS, directly set the current playback progress to the end, and then set the current playback progress to the beginning, simulate the completion of playback, fix the keyboard left and right fast forward backward

// Fix webM video keyboard event focus and playback speed control

  useEffect(() = > {

    const videoEle = document.querySelector(

      '#video-homework-popup'.)as HTMLVideoElement

    constduration = videoEle? .durationif (typeof duration === 'number'&&!isNaN(duration)) {

      videoEle.currentTime = duration

 videoEle.currentTime = 0} videoEle? .focus() videoEle? .play() }, [homeworkVideoUrl])Copy the code

Pull away useVideoRecorder

The recording screen here is not to call the computer camera, nor the use of screen sharing API, but based on remote video, using canvas to constantly draw the video, canvas drawing spread into the MediaRecorder method.

The following streamlined in addition to the business code, pure front-end recording code, of course, a lot of optimized code space, just for reference:

import React, { useEffect, useRef } from 'react'
import throttle from 'lodash/throttle'
import ysFixWebmDuration from 'fix-webm-duration'

const TimeInterval = 16
const DefaultMaxRecordMinutes = 15 // The maximum recording duration is about 15 minutes by default
const WatermarkParams = {
  width: 118.height: 42.marginRight: 25.marginTop: 17,}enum BitsPerSecond {
  '360P' = 1000000.'480P' = 2500000.'720P' = 5000000.'1080P' = 8000000,}interface RecorderOptions {
  videoRef: React.MutableRefObject<HTMLVideoElement | null> // Video tag
  videoContainerRef: React.MutableRefObject<HTMLDivElement | null> // Video tag outer divwatermark? :stringmaxRecordMinutes? :number // Maximum recording duration (min)debug? :boolean
  getResolution: () = > { width: number; height: number}}interfaceStartRecorderOptions { bitrate? :number
}

type CanvasCaptureMediaStreamTrack = MediaStreamTrack & {
  requestFrame: () = > void
}

// Record the current state of the screen
enum RecordingState {
  INACTIVE = 'inactive'.// No recording was performed, possibly because recording did not start or stopped
  PAUSED = 'paused'.// Recording has started and is currently paused
  RECORDING = 'recording'.// Recording is in progress
}

const useVideoRecorder = ({ videoRef, videoContainerRef, watermark, maxRecordMinutes = DefaultMaxRecordMinutes, debug, getResolution, }: RecorderOptions) = > {
  const recorder = useRef<MediaRecorder>()
  const recorderCanvas = useRef<HTMLCanvasElement>()
  const recorderChunks = useRef<Blob[]>([])
  const recorderStream = useRef<MediaStream | null> (null)
  const recorderVideoTrack = useRef<CanvasCaptureMediaStreamTrack>()
  const recorderContext = useRef<CanvasRenderingContext2D>()

  const watermarkImage = useRef<HTMLImageElement>()
  const cursorImage = useRef<HTMLImageElement>()
  const cursorContainer = useRef<HTMLDivElement>()
  const mousePosition = useRef<{ x: number; y: number({} >x: 0.y: 0 })

  const refreshTimer = useRef<number> ()const refreshTicks = useRef<number> (0)
  // Calculate the maximum recording duration
  const recordTimer = useRef<number> ()const durationTicks = useRef<number> (0)
  // Recording duration calculation
  const startRecordTime = useRef<number> (0)
  const durationTime = useRef<number> (0)

  const isRecording = useRef<boolean> (false)

  // Initialize canvas creation
  useEffect(() = > {
    recorderCanvas.current = document.createElement('canvas')
    const $recorderCanvas = recorderCanvas.current
    $recorderCanvas.setAttribute('style'.'display: none')
    $recorderCanvas.id = 'video-recorder-canvas'
    recorderContext.current = ($recorderCanvas.getContext(
      '2d'.)as unknown) as CanvasRenderingContext2D
    // debug canvas
    debug &&
      recorderCanvas.current.setAttribute(
        'style'.'display: block; position: fixed; bottom: 0; left: 0; height: 350px; background: #fff; z-index: 10; border: 1px solid #fff'.)document.body.appendChild(recorderCanvas.current)
    / / the watermark
    watermarkImage.current = document.createElement('img')
    watermark && watermarkImage.current.setAttribute('src', watermark)
    // Mouse cursor
    cursorImage.current = document.createElement('img')
    cursorContainer.current = document.createElement('div')
    cursorContainer.current.setAttribute(
      'style'.'pointer-events: none; z-index: 100; display: inline-block; position: absolute; ',
    )
    cursorContainer.current.appendChild(cursorImage.current)
  }, [])

  useEffect(() = >{ videoContainerRef.current? .addEventListener('mousemove', handleMousemove)

    return () = >{ videoContainerRef.current? .removeEventListener('mousemove',
        handleMousemove,
      )
    }
  }, [])

  // Whether the listener is disconnected
  useEffect(() = > {
    window.addEventListener('offline', resetVideoRecord)

    return () = > {
      window.removeEventListener('offline', resetVideoRecord)
    }
  }, [])

  const handleMousemove = throttle((e: MouseEvent) = > {
    mousePosition.current.x = e.offsetX
    mousePosition.current.y = e.offsetY
  }, 16)

  const onRefreshTimer = () = > {
    refreshTicks.current++
    / / screen
    if (
      isRecording.current &&
      refreshTicks.current % Math.round(64 / TimeInterval) === 0) { recorderVideoTrack.current? .requestFrame() recorderDrawFrame() } }// Record the screen recording duration
  const onRecordTimer = () = > {
    durationTicks.current++
    if (durationTicks.current >= maxRecordMinutes * 60) {
      pauseRecord()
    }
  }

  const recorderDrawFrame = () = > {
    const $recorderCanvas = recorderCanvas.current!
    const $player = videoRef.current!
    const ctx = recorderContext.current!
    const { width, height } = getResolution() // Get the real-time width and height of the video
    $recorderCanvas.width = width // $player.videoWidth
    $recorderCanvas.height = height // $player.videoHeight

    ctx.drawImage(
      $player,
      0.0,
      $player.videoWidth,
      $player.videoHeight,
      0.0,
      $recorderCanvas.width,
      $recorderCanvas.height,
    )
    drawWatermark(ctx, width)
  }

  // Add a watermark. The image watermark must be base64 format
  const drawWatermark = (
    ctx: CanvasRenderingContext2D,
    canvasWidth: number.) = > {
    if(watermark) { ctx.drawImage( watermarkImage.current! , canvasWidth - WatermarkParams.width - WatermarkParams.marginRight, WatermarkParams.marginTop, ) } }// Start recording
  const startRecord = (options: StartRecorderOptions = {}) = > {
    if( recorder.current? .state === RecordingState.RECORDING || recorder.current? .state === RecordingState.PAUSED ) {return
    }

    console.log('start record') recorderStream.current = recorderCanvas.current! .captureStream(0) recorderVideoTrack.current = recorderStream.current! .getVideoTracks()[0] as CanvasCaptureMediaStreamTrack
    constaudioTrack = videoRef.current? .srcObject? .getAudioTracks()[0]
    if(audioTrack) { recorderStream.current! .addTrack(audioTrack)// Input the sound
    }

    if (!window.MediaRecorder) {
      return false
    }

    const mimeType = 'video/webm; codecs=vp8'
    recorder.current = new MediaRecorder(recorderStream.current, {
      mimeType,
      // Specify the bit rates for audio and video
      bitsPerSecond: options.bitrate || BitsPerSecond['360P'],
    })
    isRecording.current = true
    refreshTimer.current = window.setInterval(onRefreshTimer, 16)
    recordTimer.current = window.setInterval(onRecordTimer, 1000)
    recorder.current.ondataavailable = handleRecordData // Stop recording callback function, return a record data store Blob contents
    recorder.current.start(10000) // Start recording media
    startRecordTime.current = Date.now()
  }

  // Pause screen recording - applies to screen recording when the maximum recording time is exceeded
  const pauseRecord = () = > {
    if( recorder.current && recorder.current? .state === RecordingState.RECORDING ) { recorder.current.pause() isRecording.current =false
      clearInterval(recordTimer.current)
      clearInterval(refreshTimer.current)
      durationTime.current = Date.now() - startRecordTime.current
    }
  }

  // Stop recording
  const stopRecord = () = > {
    return new Promise((resolve, reject) = > {
      if( recorder.current? .state === RecordingState.RECORDING || recorder.current? .state === RecordingState.PAUSED ) {console.log('stop record')
        if (!window.MediaRecorder) {
          reject(new Error('Your Browser are not support MediaRecorder API')) } recorder.current? .stop() recorderVideoTrack.current! .stop()clearInterval(refreshTimer.current)
        clearInterval(recordTimer.current)
        isRecording.current = false
        recorder.current.onstop = () = > {
          if(! durationTime.current) { durationTime.current =Date.now() - startRecordTime.current
          }

          // Fix webM video recording duration, assign duration to bloB
          ysFixWebmDuration(
            new Blob(recorderChunks.current, { type: 'video/webm' }),
            durationTime.current,
            function (fixedBlob: Blob) {
              resolve(fixedBlob)
              recorderChunks.current = []
              durationTime.current = 0}}},)else {
        reject(new Error('Recorder is not started'))}}}const resetVideoRecord = () = > {
    if( recorder.current? .state === RecordingState.RECORDING || recorder.current? .state === RecordingState.PAUSED ) { recorder.current? .stop() recorderVideoTrack.current! .stop() recorder.current.onstop =() = > {
        recorderChunks.current = []
        recorderStream.current = null
      }
    }
    isRecording.current = false
    clearInterval(refreshTimer.current)
    clearInterval(recordTimer.current)
  }

  // Process video streaming data
  const handleRecordData = (e: BlobEvent) = > {
    if (e.data.size > 0 && recorderChunks.current) {
      recorderChunks.current.push(e.data)
    }
  }

  // Download the video
  const download = (blob: Blob) = > {
    if (recorder.current && blob.size > 0) {
      const name = new Date().getTime()
      const a = document.createElement('a')
      a.href = URL.createObjectURL(blob)
      a.download = `${name}.webm`
      document.body.appendChild(a)
      a.click()
    }
  }

  return {
    startRecord,
    stopRecord,
    resetVideoRecord,
    download,
  }
}

export default useVideoRecorder
Copy the code

compatibility

If you want to do front-end recording, you need to consider compatibility issues

MediaRecorder API

  • Compatibility with lower versions of Safari (mainly with Mac wechat browser in mind)

Webm format