Recording function is generally more common in the mobile end, but in the PC also want to achieve the function of holding down talk? Project requirements: hold and hold speaking, no more than 60 seconds, generate voice files and upload, I use the recorder. Js
1 project to create a recorder. Js file, the content is as follows, you can also search a directly on Baidu
/ / compatible
window.URL = window.URL || window.webkitURL
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia
let HZRecorder = function (stream, config) {
config = config || {}
config.sampleBits = config.sampleBits || 8 // Sample digits 8, 16
config.sampleRate = config.sampleRate || (44100 / 6) // Sample rate (1/6 44100)
let context = new (window.webkitAudioContext || window.AudioContext)()
let audioInput = context.createMediaStreamSource(stream)
let createScript = context.createScriptProcessor || context.createJavaScriptNode
let recorder = createScript.apply(context, [4096.1.1])
let audioData = {
size: 0.// Length of the recording file
buffer: [], // Recording cache
inputSampleRate: context.sampleRate, // Enter the sampling rate
inputSampleBits: 16.// Input sampling digits 8, 16
outputSampleRate: config.sampleRate, // Output the sampling rate
oututSampleBits: config.sampleBits, // Output sampling digits 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
},
compress: function () { // merge compression
/ / merge
let data = new Float32Array(this.size)
let offset = 0
for (let i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
/ / compression
let compression = parseInt(this.inputSampleRate / this.outputSampleRate)
let length = data.length / compression
let result = new Float32Array(length)
let index = 0; let j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
encodeWAV: function () {
let sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
let sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
let bytes = this.compress()
let dataLength = bytes.length * (sampleBits / 8)
let buffer = new ArrayBuffer(44 + dataLength)
let data = new DataView(buffer)
let channelCount = 1/ / mono
let offset = 0
let writeString = function (str) {
for (let i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i))
}
}
// Resource exchange file identifier
writeString('RIFF'); offset += 4
// Total bytes from the next address to the end of the file, i.e. file size -8
data.setUint32(offset, 36 + dataLength, true); offset += 4
// WAV file flag
writeString('WAVE'); offset += 4
// Waveform format flag
writeString('fmt '); offset += 4
// Filter bytes, typically 0x10 = 16
data.setUint32(offset, 16.true); offset += 4
// Format category (PCM sampled data)
data.setUint16(offset, 1.true); offset += 2
/ / channel number
data.setUint16(offset, channelCount, true); offset += 2
// The sampling rate, the number of samples per second, represents the playback speed of each channel
data.setUint32(offset, sampleRate, true); offset += 4
// Waveform data transmission rate (average bytes per second) mono × data bits per second × data bits per sample /8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4
// Fast data adjustment number of bytes consumed per sample mono x number of data bits per sample /8
data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2
// Data bits per sample
data.setUint16(offset, sampleBits, true); offset += 2
// Data identifier
writeString('data'); offset += 4
// The total size of the sampled data is -44
data.setUint32(offset, dataLength, true); offset += 4
// Write the sampled data
if (sampleBits === 8) {
for (let i = 0; i < bytes.length; i++ , offset++) {
let s = Math.max(- 1.Math.min(1, bytes[i]))
let val = s < 0 ? s * 0x8000 : s * 0x7FFF
val = parseInt(255 / (65535 / (val + 32768)))
data.setInt8(offset, val, true)
}
} else {
for (let i = 0; i < bytes.length; i++ , offset += 2) {
let s = Math.max(- 1.Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF.true)
}
}
return new Blob([data], { type: 'audio/mp3' })
}
}
// Start recording
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
/ / stop
this.stop = function () {
recorder.disconnect()
}
// Get the audio file
this.getBlob = function () {
this.stop()
return audioData.encodeWAV()
}
/ / playback
this.play = function (audio) {
let downRec = document.getElementById('downloadRec')
downRec.href = window.URL.createObjectURL(this.getBlob())
downRec.download = new Date().toLocaleString() + '.mp3'
audio.src = window.URL.createObjectURL(this.getBlob())
}
/ / upload
this.upload = function (url, callback) {
let fd = new FormData()
fd.append('audioData'.this.getBlob())
let xhr = new XMLHttpRequest()
/* eslint-disable */
if (callback) {
xhr.upload.addEventListener('progress'.function (e) {
callback('uploading', e)
}, false)
xhr.addEventListener('load'.function (e) {
callback('ok', e)
}, false)
xhr.addEventListener('error'.function (e) {
callback('error', e)
}, false)
xhr.addEventListener('abort'.function (e) {
callback('cancel', e)
}, false)
}
/* eslint-disable */
xhr.open('POST', url)
xhr.send(fd)
}
// Audio capture
recorder.onaudioprocess = function (e) {
audioData.input(e.inputBuffer.getChannelData(0))
// record(e.inputBuffer.getChannelData(0));
}
}
// Throw an exception
HZRecorder.throwError = function (message) {
alert(message)
throw new function () { this.toString = function () { return message } }()
}
// Whether recording is supported
HZRecorder.canRecording = (navigator.getUserMedia ! =null)
// Get the recorder
HZRecorder.get = function (callback, config) {
if (callback) {
if (navigator.getUserMedia) {
navigator.getUserMedia(
{ audio: true } // Enable audio only
, function (stream) {
let rec = new HZRecorder(stream, config)
callback(rec)
}
, function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
HZRecorder.throwError('The user refused to provide information. ')
break
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
HZRecorder.throwError('Browsers do not support hardware devices. ')
break
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
HZRecorder.throwError('The specified hardware device could not be found. ')
break
default:
HZRecorder.throwError('Unable to open microphone. Exception message :' + (error.code || error.name))
break
}
})
} else {
HZRecorder.throwErr('The current browser does not support recording. '); return
}
}
}
export default HZRecorder
Copy the code
2. For details, see the following
<template>
<div class="wrap">
<el-form v-model="form">
<el-form-item>
<input type="button" class="btn-record-voice" @mousedown.prevent="mouseStart" @mouseup.prevent="mouseEnd" v-model="form.time"/>
<audio v-if="form.audioUrl" :src="form.audioUrl" controls="controls" class="content-audio" style="display: block;">voice</audio>
</el-form-item>
<el-form>
</div>
</template>
<script>
/ / introduce recorder. Js
import recording from '@/js/recorder/recorder.js'
export default {
data() {
return {
form: {
time: 'Hold speak (60 seconds)'.
audioUrl: ' '
},
num: 60.// Hold down the speaking time
recorder: null.
interval: ' '.
audioFileList: [].// Upload the voice list
startTime: ' '.// Voice start time
endTime: ' '.// The voice ends
}
},
mounted() {
// Initialize the recording permission
this.$nextTick (() => {
recording.get(rec => {
this.recorder = rec
})
}
},
methods: {
// Clear the timer
clearTimer () {
if (this.interval) {
this.num = 60
clearInterval(this.interval)
}
},
// Long press to speak
mouseStart () {
this.clearTimer()
this.startTime = new Date().getTime()
recording.get((rec) = > {
// The first time you press it, you want to get the browser's microphone permission, so you need to make a judgment call
if (rec) {
this.recorder = rec
this.interval = setInterval((a)= > {
if (this.num <= 0) {
this.recorder.stop()
this.num = 60
this.clearTimer()
} else {
this.num--
this.time = 'Release end (' + this.num + '秒)'
this.recorder.start()
}
}, 1000)
}
})
},
// Upload voice when released
mouseEnd () {
this.clearTimer()
this.endTime = new Date().getTime()
if (this.recorder) {
this.recorder.stop()
// Reset the talking time
this.num = 60
this.time = 'Hold down speak (' + this.num + '秒)'
// Get the voice binary
let bold = this.recorder.getBlob()
// Convert the obtained binary object to a binary file stream
let files = new File([bold], 'test.mp3', {type: 'audio/mp3'.lastModified: Date.now()})
let fd = new FormData()
fd.append('file', files)
fd.append('tenantId'.3) // Additional parameters can be filled according to your selection
// The path returned by the interface for uploading voice files is used as the voice path
this.uploadFile(fd)
}
}
}
}
</script>
<style scoped>
</style>
Copy the code
3. In addition to the comments in the code above, there are a few things to note
- When a voice is uploaded, two parameters are set: the path of the voice and the duration of the voice
this.form.audioUrl
However, the time here needs to be noted that since we set the timer to have a one-second delay at the beginning, we need to subtract one second from the obtained time - Be sure to judge the first time you hold down the words, or you will make a mistake
- The third and very important point is that I tested it in the local project, which can realize the recording function, but after packaging it to the test environment, I cannot access the microphone. After many attempts, I found that the address of our test environment is http://***, and there is such a security policy in Google Browser. Only localhost and HTTPS can be accessed, so the switch solves this problem perfectly
- In the process of use, for different browsers may have some compatibility problems, if encountered also need to deal with their own
The above mentioned voice hold down speaking function, next talk about voice playback, there are two kinds of playback, one is to return to mp3 format, by showing audio playback, the other is to return to AMR format playback
- Normal playback (MP3, WAV)
Generally speaking, this format does not require special conversion. The interface generally returns the URL and playback duration of the voice, which can be displayed through the Audio tag
- Play in AMR format
Since HTML5 tags do not support playing AMR files, they need to be handled separately, as follows
Download NPM install Benz-Amr-Recorder -S
Perform the following operations based on the information introduced in the file
// Play,playurl is the button click event, url is the obtained AMR format file
playUrl (url) {
let amr = new BenzAMRRecorder()
amr.initWithUrl(url).then(function () {
amr.play()
})
}
Copy the code
If you need to use other operations of the amr format, refer to https://www.npmjs.com/package/benz-amr-recorder
A lot of things you learned during the project, so jot them down while they are still fresh in your memory. If anything is wrong, please correct it