The front end splits the data and you can set the size of the shards either more requests and less shards or more shards and less requests
const chunkSize = 2 * 1024 * 1024; // Set the size of each chunk to 2 MB
const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
const hashFile = (file) = > {
return new Promise((resolve, reject) = > {
const chunks = Math.ceil(file.size / chunkSize);
let currentChunk = 0;
const spark = new SparkMD5.ArrayBuffer();
const fileReader = new FileReader();
function loadNext() {
const start = currentChunk * chunkSize;
const end = start + chunkSize >= file.size ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
}
fileReader.onload = e= > {
spark.append(e.target.result); // Append array buffer
currentChunk++;
if (currentChunk < chunks) {
loadNext();
console.log(The first `${currentChunk}Fragment parsing is complete${currentChunk + 1}Shard `);
} else {
console.log('finished loading');
const result = spark.end();
// If only result is used as the hash value, if the file content is the same but the name is different
// Trying to save two files cannot be saved. So add the file name.
const sparkMd5 = new SparkMD5();
sparkMd5.append(result);
sparkMd5.append(file.name);
consthexHash = sparkMd5.end(); resolve(hexHash); }}; fileReader.onerror =() = > {
console.warn('File read failed! ');
};
loadNext();
}).catch(err= > {
console.log(err);
});
}
const fileDom = $('#file') [0];
// Obtain files as an array of File objects, or multiple files if multiple selections are allowed
const files = fileDom.files;
const file = files[0];
if(! file) { alert('File not obtained');
return;
}
const blockCount = Math.ceil(file.size / chunkSize); // Total number of fragments
const axiosPromiseArray = []; / / axiosPromise array
const hash = await hashFile(file); / / file hash
// After obtaining the hash of the file, you can perform background verification based on the hash value if you need to perform resumable transmission.
// See if the file has been uploaded, and if the transfer has been completed and the uploaded slices have been uploaded.
for (let i = 0; i < blockCount; i++) {
const start = i * chunkSize;
const end = start + chunkSize >= file.size ? file.size : start + chunkSize;
// Build the form
const form = new FormData();
form.append('file', blobSlice.call(file, start, end));
form.append('name', file.name);
form.append('total', blockCount);
form.append('index', i);
form.append('size', file.size);
form.append('hash', hash);
console.log(blockCount, blobSlice.call(file, start, end), i, start, end, file.size);
// Ajax submits the sharding, in which case the content-type is multipart/form-data
const axiosOptions = {
onUploadProgress: e= > {
// Handle the upload progress
// console.log(blockCount, i, e, file);}};// Add to the Promise array
axiosPromiseArray.push(axios.post('/uploadFile', form, axiosOptions));
}
await axios.all(axiosPromiseArray).then((result) = > {
/ / merge chunks
const data = {
size: file.size,
name: file.name,
total: blockCount,
hash
};
const form = new FormData();
form.append('size', file.size);
form.append('name', file.name);
form.append('total', blockCount);
form.append('hash', hash);
console.log(result);
axios.post("/file/chunks", form).then(res= > {
console.log(res)
})
}).catch((err) = >{});console.log("All uploaded.");
})
Copy the code
The back end accepts it and creates a temporary folder to store the files uploaded by the front end. Here you can use Redis to store the uploaded information to continue the breakpoint
var dir, _ = os.Getwd()
var uploadPath = path.Join(dir, "uploads")
var uploadTempPath = path.Join(uploadPath, "temp")
func sayhello(w http.ResponseWriter, r *http.Request) {
r.ParseForm() // Parses parameters, which are not parsed by default
t, err := template.ParseFiles("static/index.html")
iferr ! =nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
t.Execute(w, "Zhang")
return
}
// Check whether the folder exists
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true.nil
}
if os.IsNotExist(err) {
return false.nil
}
return false, err
}
func uploadFile(w http.ResponseWriter, r *http.Request) {
file, _, err := r.FormFile("file")
// total := r.PostFormValue("total")
index := r.PostFormValue("index")
// size, err := strconv.ParseInt(r.PostFormValue("size"), 10, 64)
hash := r.PostFormValue("hash")
// name := r.PostFormValue("name")
// Get all folders under Uploads
nameList, err := ioutil.ReadDir(uploadPath)
m := map[string]interface{} {"code": 46900."msg": "File uploaded",
}
result, _ := json.MarshalIndent(m, ""."")
// Loop to see if the hash is in the file and if so return upload completed
for _, name := range nameList {
tmpName := strings.Split(name.Name(), "_") [0]
if tmpName == hash {
fmt.Fprintf(w, string(result))
return
}
}
chunksPath := path.Join(uploadTempPath, hash, "/")
isPathExists, err := PathExists(chunksPath)
if! isPathExists { err = os.MkdirAll(chunksPath, os.ModePerm) } destFile, err := os.OpenFile(path.Join(chunksPath+"/"+hash+"-"+index), syscall.O_CREAT|syscall.O_WRONLY, 0777)
reader := bufio.NewReader(file)
writer := bufio.NewWriter(destFile)
buf := make([]byte.1024*1024) // 1M buf
for {
n, err := reader.Read(buf)
if err == io.EOF {
writer.Flush()
break
} else iferr ! =nil {
return
} else {
writer.Write(buf[:n])
}
}
defer file.Close()
defer destFile.Close()
iferr ! =nil {
log.Fatal("%v", err)
}
}
// Merge files
func chunks(w http.ResponseWriter, r *http.Request) {
// total, _ := strconv.Atoi(r.PostFormValue("total"))
// index := r.PostFormValue("index")
size, _ := strconv.ParseInt(r.PostFormValue("size"), 10.64)
hash := r.PostFormValue("hash")
name := r.PostFormValue("name")
toSize, _ := DirSize(path.Join(uploadTempPath, hash, "/"))
ifsize ! = toSize { fmt.Fprintf(w,"File upload error")
}
chunksPath := path.Join(uploadTempPath, hash, "/")
files, _ := ioutil.ReadDir(chunksPath)
fs, _ := os.OpenFile(path.Join(uploadPath, hash+"_"+name), os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModeAppend|os.ModePerm)
var wg sync.WaitGroup
wg.Add(len(files))
for i, f := range files {
go func(f os.FileInfo) {
// The file will be damaged if it is read in sequence
name := strings.Split(f.Name(), "-") [0] + "-" + strconv.Itoa(i)
fileName := path.Join(chunksPath, "/"+name)
data, _ := ioutil.ReadFile(fileName)
fs.Write(data)
os.RemoveAll(path.Join(chunksPath, "/"))
defer wg.Done()
}(f)
}
wg.Wait()
m := map[string]interface{} {"code": 20000."msg": "Upload successful",
}
result, _ := json.MarshalIndent(m, ""."")
fmt.Fprintf(w, string(result))
defer fs.Close()
}
// Get the overall folder size
func DirSize(path string) (int64, error) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if! info.IsDir() { size += info.Size() }return err
})
return size, err
}
func main(a) {
http.HandleFunc("/", sayhello) // set router
http.HandleFunc("/uploadFile", uploadFile)
http.HandleFunc("/file/chunks", chunks)
err := http.ListenAndServe(": 8080".nil) // set listen port
iferr ! =nil {
log.Fatal("Error while starting GO http server on port - 8080 : ", err) //log error and exit in case of error at server boot up}}Copy the code
The first time I wrote it, it might not be the best solution and I want you to bear with me