Backend Development 16 min read

Implementing Small and Large File Uploads with Chunking, Resume, and Instant Transfer Using Spring Boot and Vanilla JavaScript

This article demonstrates how to build a complete file upload solution in Java and JavaScript, covering simple small-file uploads, large-file chunked uploads, breakpoint resume, and instant upload, with detailed backend Spring Boot 3.1.2 code and plain JavaScript front‑end implementation.

Java Architect Essentials
Java Architect Essentials
Java Architect Essentials
Implementing Small and Large File Uploads with Chunking, Resume, and Instant Transfer Using Spring Boot and Vanilla JavaScript

File upload is a common requirement in web projects, but simple form uploads become unreliable for large files (1 GB+) or slow networks, leading to failed uploads and poor user experience.

For small files the demo uses Spring Boot 3.1.2 (JDK 17) on the backend and native JavaScript with spark‑md5.min.js on the front end. The Maven pom.xml configuration is:

<parent>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-parent</artifactId>
    <version>3.1.2</version>
</parent>
<properties>
    <java.version>17</java.version>
</properties>
<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
</dependencies>
<build>
    <plugins>
        <plugin>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-maven-plugin</artifactId>
        </plugin>
    </plugins>
</build>

The controller handling a single file upload is:

@RestController
public class UploadController {
    public static final String UPLOAD_PATH = "D:\\upload\\";

    @RequestMapping("/upload")
    public ResponseEntity<Map<String, String>> upload(@RequestParam MultipartFile file) throws IOException {
        File dstFile = new File(UPLOAD_PATH, String.format("%s.%s", UUID.randomUUID(), StringUtils.getFilename(file.getOriginalFilename())));
        file.transferTo(dstFile);
        return ResponseEntity.ok(Map.of("path", dstFile.getAbsolutePath()));
    }
}

The front‑end HTML page uses a simple form and JavaScript to show upload progress:

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>upload</title>
</head>
<body>
    upload
    <form enctype="multipart/form-data">
        <input type="file" name="fileInput" id="fileInput">
        <input type="button" value="上传" onclick="uploadFile()">
    </form>
    上传结果 <span id="uploadResult"></span>
    <script>
        var uploadResult = document.getElementById("uploadResult");
        function uploadFile() {
            var file = document.getElementById('fileInput').files[0];
            if (!file) return;
            var xhr = new XMLHttpRequest();
            xhr.upload.onprogress = function (e) {
                var percent = 100 * e.loaded / e.total;
                uploadResult.innerHTML = '上传进度:' + percent + '%';
            };
            xhr.onload = function () {
                if (xhr.status === 200) uploadResult.innerHTML = '上传成功' + xhr.responseText;
            };
            xhr.onerror = function () { uploadResult.innerHTML = '上传失败'; };
            xhr.open('POST', '/upload', true);
            var formData = new FormData();
            formData.append('file', file);
            xhr.send(formData);
        }
    </script>
</body>
</html>

For large files the demo switches to a chunked upload strategy. The front‑end first calculates the file’s MD5 using spark‑md5 , then slices the file into 1 MiB chunks and uploads each chunk with additional parameters ( chunkSize , chunkNumber , totalNumber , md5 ). The core JavaScript functions are:

var chunkSize = 1 * 1024 * 1024; // 1 MiB
var fileMd5Span = document.getElementById('fileMd5');
var uploadResult = document.getElementById('uploadResult');
var fileMd5;
function calculateFileMD5() {
    var file = document.getElementById('fileInput').files[0];
    getFileMd5(file).then(md5 => { fileMd5 = md5; fileMd5Span.innerHTML = md5; });
}
function uploadFile() {
    var file = document.getElementById('fileInput').files[0];
    if (!file || !fileMd5) return;
    var chunks = sliceFile(file);
    var fileName = file.name;
    chunks.forEach((chunk, i) => {
        var data = new FormData();
        data.append('totalNumber', chunks.length);
        data.append('chunkSize', chunkSize);
        data.append('chunkNumber', i);
        data.append('md5', fileMd5);
        data.append('file', new File([chunk], fileName));
        upload(data);
    });
}
function getFileMd5(file) {
    return new Promise((resolve) => {
        var reader = new FileReader();
        reader.onload = function (e) { resolve(SparkMD5.ArrayBuffer.hash(e.target.result)); };
        reader.readAsArrayBuffer(file);
    });
}
function sliceFile(file) {
    const chunks = [];
    let start = 0;
    while (start < file.size) {
        let end = Math.min(start + chunkSize, file.size);
        chunks.push(file.slice(start, end));
        start = end;
    }
    return chunks;
}
function upload(data) {
    var xhr = new XMLHttpRequest();
    xhr.onload = function () { if (xhr.status === 200) uploadResult.append('上传成功分片:' + data.get('chunkNumber') + '\t'); };
    xhr.onerror = function () { uploadResult.innerHTML = '上传失败'; };
    xhr.open('POST', '/uploadBig', true);
    xhr.send(data);
}
function checkFile() {
    var xhr = new XMLHttpRequest();
    xhr.onload = function () { if (xhr.status === 200) document.getElementById('checkFileRes').innerHTML = '检测文件完整性成功:' + xhr.responseText; };
    xhr.onerror = function () { document.getElementById('checkFileRes').innerHTML = '检测文件完整性失败'; };
    xhr.open('POST', '/checkFile', true);
    var data = new FormData();
    data.append('md5', fileMd5);
    xhr.send(data);
}

The backend provides two endpoints: /uploadBig stores each chunk using RandomAccessFile (or optionally MappedByteBuffer ) and records the upload status in a .conf file; /checkFile reads the status file, verifies whether all chunks are uploaded, and if so computes the final file MD5 to confirm integrity.

@RestController
public class UploadController {
    public static final String UPLOAD_PATH = "D:\\upload\\";

    @RequestMapping("/uploadBig")
    public ResponseEntity<Map<String, String>> uploadBig(@RequestParam Long chunkSize,
                                                    @RequestParam Integer totalNumber,
                                                    @RequestParam Long chunkNumber,
                                                    @RequestParam String md5,
                                                    @RequestParam MultipartFile file) throws IOException {
        String dstFile = String.format("%s\\%s\\%s.%s", UPLOAD_PATH, md5, md5, StringUtils.getFilenameExtension(file.getOriginalFilename()));
        String confFile = String.format("%s\\%s\\%s.conf", UPLOAD_PATH, md5, md5);
        File dir = new File(dstFile).getParentFile();
        if (!dir.exists()) {
            dir.mkdir();
            byte[] bytes = new byte[totalNumber];
            Files.write(Path.of(confFile), bytes);
        }
        try (RandomAccessFile raf = new RandomAccessFile(dstFile, "rw");
             RandomAccessFile rafConf = new RandomAccessFile(confFile, "rw");
             InputStream is = file.getInputStream()) {
            raf.seek(chunkNumber * chunkSize);
            raf.write(is.readAllBytes());
            rafConf.seek(chunkNumber);
            rafConf.write(1);
        }
        return ResponseEntity.ok(Map.of("path", dstFile));
    }

    @RequestMapping("/checkFile")
    public ResponseEntity<Map<String, String>> checkFile(@RequestParam String md5) throws Exception {
        String confPath = String.format("%s\\%s\\%s.conf", UPLOAD_PATH, md5, md5);
        Path path = Path.of(confPath);
        if (!Files.exists(path.getParent())) {
            return ResponseEntity.ok(Map.of("msg", "文件未上传"));
        }
        byte[] status = Files.readAllBytes(path);
        StringBuilder sb = new StringBuilder();
        for (byte b : status) sb.append(b);
        if (!sb.toString().contains("0")) {
            // all chunks uploaded, verify MD5
            File dir = new File(String.format("%s\\%s", UPLOAD_PATH, md5));
            for (File f : dir.listFiles()) {
                if (!f.getName().endsWith(".conf")) {
                    try (InputStream is = new FileInputStream(f)) {
                        String md5calc = DigestUtils.md5DigestAsHex(is);
                        if (!md5calc.equalsIgnoreCase(md5)) {
                            return ResponseEntity.ok(Map.of("msg", "文件上传失败"));
                        }
                        return ResponseEntity.ok(Map.of("path", f.getAbsolutePath()));
                    }
                }
            }
        }
        return ResponseEntity.ok(Map.of("chucks", sb.toString()));
    }
}

Configuration notes: increase spring.servlet.multipart.max-file-size and spring.servlet.multipart.max-request-size (e.g., to 1024 MB) in application.properties , and adjust client_max_body_size in Nginx if a 413 error occurs.

Resume upload (断点续传) works by calling /checkFile to obtain the status string; any chunk marked with 0 is re‑uploaded using the same chunked upload flow until all chunks succeed.

Instant upload (秒传) leverages the MD5 hash: if a file with the same MD5 already exists on the server, the /checkFile endpoint returns the existing file path, allowing the client to skip uploading the data entirely.

In summary, the article provides a complete, production‑ready implementation of small‑file upload, large‑file chunked upload, breakpoint resume, and instant upload, with all necessary backend Spring Boot code and plain JavaScript front‑end logic.

javascriptSpring BootFile UploadChunked UploadResume Uploadinstant upload
Java Architect Essentials
Written by

Java Architect Essentials

Committed to sharing quality articles and tutorials to help Java programmers progress from junior to mid-level to senior architect. We curate high-quality learning resources, interview questions, videos, and projects from across the internet to help you systematically improve your Java architecture skills. Follow and reply '1024' to get Java programming resources. Learn together, grow together.

0 followers
Reader feedback

How this landed with the community

login Sign in to like

Rate this article

Was this worth your time?

Sign in to rate
Discussion

0 Comments

Thoughtful readers leave field notes, pushback, and hard-won operational detail here.