go进行大文件上传

读取前端上传的文件 创建一个临时文件夹保存上传的切片数据
前端上传完成后发送请求数据到后端合并文件
合并数据时一定要确保文件的顺序

var dir, _ = os.Getwd()
var uploadPath = path.Join(dir, "uploads")
var uploadTempPath = path.Join(uploadPath, "temp")

func sayhello(w http.ResponseWriter, r *http.Request) {
    r.ParseForm() //解析参数,默认是不会解析的
    t, err := template.ParseFiles("static/index.html")
    if err != nil {
        http.Error(w, err.Error(), http.StatusInternalServerError)
        return
    }
    t.Execute(w, "张三")
    return
}
func PathExists(path string) (bool, error) {
    _, err := os.Stat(path)
    if err == nil {
        return true, nil
    }
    if os.IsNotExist(err) {
        return false, nil
    }
    return false, err
}
func uploadFile(w http.ResponseWriter, r *http.Request) {
    file, _, err := r.FormFile("file")
    // total := r.PostFormValue("total")
    index := r.PostFormValue("index")
    // size, err := strconv.ParseInt(r.PostFormValue("size"), 10, 64)
    hash := r.PostFormValue("hash")
    // name := r.PostFormValue("name")
    nameList, err := ioutil.ReadDir(uploadPath)
    m := map[string]interface{}{
        "code": 46900,
        "msg":  "文件已上传",
    }
    result, _ := json.MarshalIndent(m, "", "    ")
    for _, name := range nameList {
        tmpName := strings.Split(name.Name(), "_")[0]
        if tmpName == hash {
            fmt.Fprintf(w, string(result))
            return
        }
    }

    chunksPath := path.Join(uploadTempPath, hash, "/")

    isPathExists, err := PathExists(chunksPath)
    if !isPathExists {
        err = os.MkdirAll(chunksPath, os.ModePerm)
    }
    destFile, err := os.OpenFile(path.Join(chunksPath+"/"+hash+"-"+index), syscall.O_CREAT|syscall.O_WRONLY, 0777)
    reader := bufio.NewReader(file)
    writer := bufio.NewWriter(destFile)
    buf := make([]byte, 1024*1024) // 1M buf
    for {
        n, err := reader.Read(buf)
        if err == io.EOF {
            writer.Flush()
            break
        } else if err != nil {
            return
        } else {
            writer.Write(buf[:n])
        }
    }

    defer file.Close()
    defer destFile.Close()
    if err != nil {
        log.Fatal("%v", err)
    }
}
// 合并文件
func chunks(w http.ResponseWriter, r *http.Request) {
    // total, _ := strconv.Atoi(r.PostFormValue("total"))
    // index := r.PostFormValue("index")
    size, _ := strconv.ParseInt(r.PostFormValue("size"), 10, 64)
    hash := r.PostFormValue("hash")
    name := r.PostFormValue("name")

    toSize, _ := DirSize(path.Join(uploadTempPath, hash, "/"))
    if size != toSize {
        fmt.Fprintf(w, "文件上传错误")
    }
    chunksPath := path.Join(uploadTempPath, hash, "/")
    files, _ := ioutil.ReadDir(chunksPath)
    fs, _ := os.OpenFile(path.Join(uploadPath, hash+"_"+name), os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModeAppend|os.ModePerm)
    var wg sync.WaitGroup
    wg.Add(len(files))
    for i, f := range files {
        go func(f os.FileInfo) {
            name := strings.Split(f.Name(), "-")[0] + "-" + strconv.Itoa(i)
            fileName := path.Join(chunksPath, "/"+name)
            data, _ := ioutil.ReadFile(fileName)
            fs.Write(data)
            os.RemoveAll(path.Join(chunksPath, "/"))
            defer wg.Done()
        }(f)

    }
    wg.Wait()
    m := map[string]interface{}{
        "code": 20000,
        "msg":  "上传成功",
    }
    result, _ := json.MarshalIndent(m, "", "    ")
    fmt.Fprintf(w, string(result))
    defer fs.Close()

}

// 获取整体文件夹大小
func DirSize(path string) (int64, error) {
    var size int64
    err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
        if !info.IsDir() {
            size += info.Size()
        }
        return err
    })
    return size, err
}
func main() {
    http.HandleFunc("/", sayhello) // set router
    http.HandleFunc("/uploadFile", uploadFile)
    http.HandleFunc("/file/chunks", chunks)
    err := http.ListenAndServe(":8080", nil) // set listen port
    if err != nil {
        log.Fatal("Error while starting GO http server on port - 8080 : ", err) //log error and exit in case of error at server boot up
    }
}

前端利用h5的File api读文件进行分割

const chunkSize = 2 * 1024 * 1024; // 每个chunk的大小,设置为2兆
    const blobSlice =
        File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
    const hashFile = (file) => {
        return new Promise((resolve, reject) => {
            const chunks = Math.ceil(file.size / chunkSize);
            let currentChunk = 0;
            const spark = new SparkMD5.ArrayBuffer();
            const fileReader = new FileReader();
            function loadNext() {
                const start = currentChunk * chunkSize;
                const end = start + chunkSize >= file.size ? file.size : start + chunkSize;
                fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
            }
            fileReader.onload = e => {
                spark.append(e.target.result); // Append array buffer
                currentChunk++;
                if (currentChunk < chunks) {
                    loadNext();
                    console.log(`第${currentChunk}分片解析完成,开始解析${currentChunk + 1}分片`);
                } else {
                    console.log('finished loading');
                    const result = spark.end();
                    // 如果单纯的使用result 作为hash值的时候, 如果文件内容相同,而名称不同的时候
                    // 想保留两个文件无法保留。所以把文件名称加上。
                    const sparkMd5 = new SparkMD5();
                    sparkMd5.append(result);
                    sparkMd5.append(file.name);
                    const hexHash = sparkMd5.end();
                    resolve(hexHash);
                }
            };
            fileReader.onerror = () => {
                console.warn('文件读取失败!');
            };
            loadNext();
        }).catch(err => {
            console.log(err);
        });
    }


    // 使用Blob.slice方法来对文件进行分割。
    // 同时该方法在不同的浏览器使用方式不同。
    const fileDom = $('#file')[0];
    // 获取到的files为一个File对象数组,如果允许多选的时候,文件为多个
    const files = fileDom.files;
    const file = files[0];
    if (!file) {
        alert('没有获取文件');
        return;
    }
    const blockCount = Math.ceil(file.size / chunkSize); // 分片总数
    const axiosPromiseArray = []; // axiosPromise数组
    const hash = await hashFile(file); //文件 hash 
    // 获取文件hash之后,如果需要做断点续传,可以根据hash值去后台进行校验。
    // 看看是否已经上传过该文件,并且是否已经传送完成以及已经上传的切片。
    for (let i = 0; i < blockCount; i++) {
        const start = i * chunkSize;
        const end = start + chunkSize >= file.size ? file.size : start + chunkSize;
        // 构建表单
        const form = new FormData();
        form.append('file', blobSlice.call(file, start, end));
        form.append('name', file.name);
        form.append('total', blockCount);
        form.append('index', i);
        form.append('size', file.size);
        form.append('hash', hash);
        console.log(blockCount, blobSlice.call(file, start, end), i, start, end, file.size);
        // ajax提交 分片,此时 content-type 为 multipart/form-data
        const axiosOptions = {
            onUploadProgress: e => {
                // 处理上传的进度
                // console.log(blockCount, i, e, file);
            },
        };
        // 加入到 Promise 数组中

        axiosPromiseArray.push(axios.post('/uploadFile', form, axiosOptions));
    }
    // 等待所有的请求完毕 然后发送合并的请求
    await axios.all(axiosPromiseArray).then((result) => {
        // 合并chunks
        const data = {
            size: file.size,
            name: file.name,
            total: blockCount,
            hash
        };
        const form = new FormData();
        form.append('size', file.size);
        form.append('name', file.name);
        form.append('total', blockCount);
        form.append('hash', hash);
        console.log(result);
        axios.post("/file/chunks", form).then(res => {
            console.log(res)
        })
    }).catch((err) => {

    });
    console.log("全部上传完毕");
})

标签: none

添加新评论