There have been a lot of technical solutions about uploading large files on the Internet, but there is a difficulty that has not been solved, most of them are to directly divide large files into N pieces and then upload all of them (what if N is very large?). , but in practice the user may cut large files into N pieces and upload m (m < N) files at a time. The key technical points of this problem are mainly recorded here.
The project requirements
The first is the implementation principle, if a large file of 1 G is divided into 10 parts to transmit when it is uploaded to the server, the theoretical upload speed can be increased by 10 times if the bandwidth is sufficient.
Attributes that need to be defined initially
return {
// The uploaded file
file: {},
// represents a collection of shard files
chunks: [].// File upload progress
uploadProgress: 0.// chunks upload sequence number
uploadNum: 0.// The number of slices uploaded
uploadedNum: 0.// Number of concurrent file uploads
maxUploadNums: 10.// File slice size
splieSize: 1024 * 1024 * 5.// MD5 check value of the file
md5: ' '
}
Copy the code
Method of splitting files
function sliceFile (file, piece = 1024 * 1024 * 5) {
// piece specifies the size of a file to be cut. The default size is 5M
// Total file size
const totalSize = file.size
// Start byte of each upload
let start = 0
// The last byte of each upload
let end = start + piece
// chunks represents a collection of shard files
const chunks = []
while (start < totalSize) {
const blob = file.slice(start, end)
chunks.push(blob)
start = end
end = start + piece
}
}
Copy the code
Generate file MD5 (submit file MD5 to the back end, which verifies whether the file is consistent with the file uploaded by the front end after merging fragments)
// Spark-MD5 is required
import SparkMD5 from 'spark-md5'
function generateMD5 (file) {
const spark = new SparkMD5.ArrayBuffer()
const fileReader = new FileReader()
let currentChunk = 0
const chunkSize = this.splieSize
const chunksNum = Math.ceil(file.size / chunkSize)
const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
fileReader.onload = (e) = > {
console.log('read chunk nr', currentChunk + 1.'of', chunksNum)
spark.append(e.target.result)
currentChunk++
if (currentChunk < chunksNum) {
loadNext()
} else {
this.fileMD5 = spark.end()
console.log(this.fileMD5)
}
}
fileReader.onerror = function () {
console.warn('oops, something went wrong.')}function loadNext () {
const start = currentChunk * chunkSize
const end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end))
}
loadNext()
}
Copy the code
To upload
startUpload () {
const lth = this.chunks.length > this.maxUploadNums ? this.maxUploadNums : this.chunks.length
this.uploadNum = 0
this.uploadedNum = 0
for (let i = 0; i < lth; i++) {
this.uploadChunk({
serial: this.file.uid,
chunk: this.uploadNum,
file: this.chunks[this.uploadNum]
})
this.uploadNum++
}
},
Copy the code
Method of uploading files
uploadChunk (params = {}) {
const form = new FormData()
form.append('serial', params.serial)
form.append('chunk', params.chunk)
form.append('file', params.file)
const { code } = await uploadFileAPI(form)
if (code === 200) {
this.uploadedNum++
if (this.uploadNum < this.chunks.length) {
this.uploadChunk({
serial: this.file.uid,
chunk: this.uploadNum,
file: this.chunks[this.uploadNum]
})
this.uploadNum++
} else if (this.uploadedNum === this.chunks.length) {
// After uploading slices, merge slices
this.mergeFile({
id: this.file.uid
})
}
}
},
Copy the code
Merge files
async mergeFile (params = {}) {
params.md5 = this.fileMD5
const { code } = await mergeFileAPI(params)
if (code === 200) {
// Merge successful}}Copy the code