SpringBoot文件分片上传
大文件上传耗时过长,大文件转md5耗时过长。后端技术:Spring Boot+MyBatis+MySql前端分为两种:都是使用spark-md5.js1、vue使用插件vue-simple-uploader2、jquery前端项目只有一个页面,使用原生的方式,方便学习后端服务只有一个,接口是通用的,文章里前端我以jquery的为示例,方便大家参考。文件上传分片上传秒传断点续传
项目场景:
将大文件分割成小的片段,然后通过多个请求并行上传这些片段,最终在服务器端将这些片段合并还原为完整的文件。这种方式有助于规避一些上传过程中的问题,如网络不稳定、上传中断等,并能提高上传速度。
解决方案:
后端技术:Spring Boot+MyBatis+MySql
前端分为两种:都是使用spark-md5.js
1、vue使用插件vue-simple-uploader
2、jquery前端项目只有一个页面,使用原生的方式,方便学习
后端服务只有一个,接口是通用的,文章里前端我以jquery的为示例,方便大家参考。源码里面两种方式都包括了:https://download.csdn.net/download/u011974797/86862270
1、文件上传
小文件(图片、文档、视频)上传可以直接使用很多ui框架封装的上传组件,或者自己写一个input 上传,利用FormData 对象提交文件数据,后端使用spring提供的MultipartFile进行文件的接收,然后写入即可。但是对于比较大的文件,比如上传2G左右的文件(http上传),就需要将文件分片上传(file.slice()),否则中间http长时间连接可能会断掉。
2、分片上传
分片上传,就是将所要上传的文件,按照一定的大小,将整个文件分隔成多个数据块(我们称之为Part)来进行分别上传,上传完之后再由服务端对所有上传的文件进行汇总整合成原始的文件。
3、秒传
通俗的说,你把要上传的东西上传,通过spark-md5.js转md5,服务器会先做MD5校验,如果服务器上有一样的东西,它就直接给你个新地址,其实你下载的都是服务器上的同一个文件,想要不秒传,其实只要让MD5改变,就是对文件本身做一下修改(改名字不行),例如一个文本文件,你多加几个字,MD5就变了,就不会秒传了.
4、断点续传
断点续传是在下载或上传时,将下载或上传任务(一个文件或一个压缩包)人为的划分为几个部分,每一个部分采用一个线程进行上传或下载,如果碰到网络故障,可以从已经上传或下载的部分开始继续上传或者下载未完成的部分,而没有必要从头开始上传或者下载。本文的断点续传主要是针对断点上传场景。
源码:
关键代码就是Controller和Service,entity和返回结果Result可以按自己的框架来
Controller
package com.test.controller;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.FileInputStream;
import java.io.OutputStream;
import java.net.URLEncoder;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.test.entity.ChunkEntity;
import com.test.entity.FileListEntity;
import com.test.query.FileQuery;
import com.test.service.FileService;
import com.test.utils.Result;
import com.test.vo.UploadResult;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import lombok.extern.slf4j.Slf4j;
@RestController
@RequestMapping("/file")
@Api(tags = { "测试文件上传" })
@Slf4j
public class TestFileController {
@Value("${testfile.filePath}")
private String filePath;
@Autowired
private FileService fileService;
/**
* 上传文件块
* vue-simple-uploader会调用post方法上传
*/
@ApiOperation(value = "上传文件块")
@RequestMapping(value = "/upload", method = RequestMethod.POST)
public Result<?> uploadChunk(ChunkEntity chunkEntity) {
log.info("文件名: {}, chunkNumber: {}", chunkEntity.getFilename(), chunkEntity.getChunkNumber());
boolean flag = fileService.uploadChunk(chunkEntity);
if(flag){
return Result.ok();
}
return Result.error();
}
/**
* 检查文件块
* vue-simple-uploader会调用get方法验证
*/
@ApiOperation(value = "检查文件块")
@RequestMapping(value = "/upload", method = RequestMethod.GET)
public Result<?> checkChunk(ChunkEntity chunkEntity) {
UploadResult result = fileService.checkChunk(chunkEntity);
return Result.ok(result);
}
/**
* 合并文件
*/
@ApiOperation(value = "合并文件")
@RequestMapping(value = "/merge", method = RequestMethod.POST)
public Result<?> merge(@RequestBody FileListEntity fileInfo) {
boolean flag = fileService.merge(fileInfo);
if(flag){
return Result.ok();
}
return Result.error();
}
/**
* 查询列表
*/
@ApiOperation(value = "查询列表")
@RequestMapping(value = "/selectFileList", method = RequestMethod.POST)
public Result<?> selectFileList(@RequestBody FileQuery fileQuery) {
IPage<FileListEntity> ipage = fileService.selectFileList(fileQuery);
return Result.ok(ipage);
}
}
Service
package com.test.service;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.CollectionUtils;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.test.entity.ChunkEntity;
import com.test.entity.FileListEntity;
import com.test.mapper.ChunkDao;
import com.test.mapper.FileListDao;
import com.test.query.FileQuery;
import com.test.vo.UploadResult;
import lombok.extern.slf4j.Slf4j;
@Service
@Slf4j
public class FileService extends ServiceImpl<ChunkDao, ChunkEntity> {
@Value("${testfile.filePath}")
private String filePath;
@Autowired
private FileListDao fileListDao;
/**
* 上传文件块
* @param chunkEntity 文件块
* @return true成功
*/
public boolean uploadChunk(ChunkEntity chunkEntity) {
Path path = Paths.get(generatePath(filePath, chunkEntity));
try {
Files.write(path, chunkEntity.getUpfile().getBytes());
log.debug("文件 {} 写入成功, md5:{}", chunkEntity.getFilename(), chunkEntity.getIdentifier());
//写入数据库
this.save(chunkEntity);
} catch (IOException e) {
log.error("上传文件块失败: "+e);
return false;
}
return true;
}
/**
* 检查文件块
* @param chunkEntity 文件块
* @return
*/
public UploadResult checkChunk(ChunkEntity chunkEntity) {
UploadResult result = new UploadResult();
//查询本地磁盘和数据库记录选一种方式
/*
//直接查询本地磁盘
String file = filePath + "/" + chunkEntity.getIdentifier() + "/" + chunkEntity.getFilename();
//先判断整个文件是否已经上传过了,如果是,则告诉前端跳过上传,实现秒传
if(fileExists(file)) {
result.setSkipUpload(true);
return result;
}
*/
//查询数据库记录
//先判断整个文件是否已经上传过了,如果是,则告诉前端跳过上传,实现秒传
QueryWrapper<FileListEntity> fileWrapper = new QueryWrapper<FileListEntity>();
fileWrapper.lambda().eq(FileListEntity::getDelFlag, 0);
fileWrapper.lambda().eq(FileListEntity::getIdentifier, chunkEntity.getIdentifier());
FileListEntity fileListEntity = fileListDao.selectOne(fileWrapper);
if (fileListEntity != null) {
result.setSkipUpload(true);
result.setFileId(fileListEntity.getId());
return result;
}
//如果完整文件不存在,则去数据库判断当前哪些文件块已经上传过了,把结果告诉前端,跳过这些文件块的上传,实现断点续传
QueryWrapper<ChunkEntity> chunkWrapper = new QueryWrapper<ChunkEntity>();
chunkWrapper.lambda().eq(ChunkEntity::getIdentifier, chunkEntity.getIdentifier());
List<ChunkEntity> chunkList = this.list(chunkWrapper);
//将已存在的块的chunkNumber列表返回给前端,前端会规避掉这些块
if (!CollectionUtils.isEmpty(chunkList)) {
List<Integer> collect = chunkList.stream().map(ChunkEntity::getChunkNumber).collect(Collectors.toList());
result.setUploadedChunks(collect);
}
return result;
}
@Transactional(rollbackFor = Exception.class)
public boolean merge(FileListEntity fileInfo) {
String filename = fileInfo.getFilename();
String file = filePath + "/" + fileInfo.getIdentifier() + "/" + filename;
String folder = filePath + "/" + fileInfo.getIdentifier();
boolean flag = mergeFile(file, folder, filename);
if(!flag){
return false;
}
//当前文件已存在数据库中时,返回已存在标识
QueryWrapper<FileListEntity> fileWrapper = new QueryWrapper<FileListEntity>();
fileWrapper.lambda().eq(FileListEntity::getDelFlag, 0);
fileWrapper.lambda().eq(FileListEntity::getIdentifier, fileInfo.getIdentifier());
Integer count = fileListDao.selectCount(fileWrapper);
if (count <= 0) {
fileInfo.setLocation(file);
fileListDao.insert(fileInfo);
}
//插入文件记录成功后,删除chunk表中的对应记录,释放空间
QueryWrapper<ChunkEntity> chunkWrapper = new QueryWrapper<ChunkEntity>();
chunkWrapper.lambda().eq(ChunkEntity::getIdentifier, fileInfo.getIdentifier());
this.remove(chunkWrapper);
return true;
}
/**
* 查看应用列表
*/
public IPage<FileListEntity> selectFileList(FileQuery fileQuery) {
QueryWrapper<FileListEntity> qw = new QueryWrapper<>();
qw.lambda().eq(FileListEntity::getDelFlag, 0);
qw.lambda().like(StringUtils.isNotBlank(fileQuery.getName()), FileListEntity::getFilename, fileQuery.getName());
qw.lambda().orderByDesc(FileListEntity::getCreateTime);
IPage<FileListEntity> iPage = fileListDao.selectPage(new Page<>(fileQuery.getPage(), fileQuery.getLimit()), qw);
return iPage;
}
/**
* 功能描述:生成块文件所在地址
*/
private String generatePath(String uploadFolder, ChunkEntity chunk) {
StringBuilder sb = new StringBuilder();
//文件夹地址/md5
sb.append(uploadFolder).append("/").append(chunk.getIdentifier());
//判断uploadFolder/identifier 路径是否存在,不存在则创建
if (!Files.isWritable(Paths.get(sb.toString()))) {
log.info("path not exist,create path: {}", sb.toString());
try {
Files.createDirectories(Paths.get(sb.toString()));
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
//文件夹地址/md5/文件名-1
return sb.append("/")
.append(chunk.getFilename())
.append("-")
.append(chunk.getChunkNumber()).toString();
}
/**
* 文件合并
*
* @param targetFile 要形成的文件名
* @param folder 要形成的文件夹地址
* @param filename 文件的名称
*/
private boolean mergeFile(String targetFile, String folder, String filename) {
try {
//先判断文件是否存在
if(fileExists(targetFile)) {
//文件已存在
return true;
}
Files.createFile(Paths.get(targetFile));
Files.list(Paths.get(folder))
.filter(path -> !path.getFileName().toString().equals(filename))
.sorted((o1, o2) -> {
String p1 = o1.getFileName().toString();
String p2 = o2.getFileName().toString();
int i1 = p1.lastIndexOf("-");
int i2 = p2.lastIndexOf("-");
return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
})
.forEach(path -> {
try {
//以追加的形式写入文件
Files.write(Paths.get(targetFile), Files.readAllBytes(path), StandardOpenOption.APPEND);
//合并后删除该块
Files.delete(path);
} catch (IOException e) {
log.error(e.getMessage(), e);
}
});
} catch (IOException e) {
log.error("文件合并失败: ", e);
return false;
}
return true;
}
/**
* 根据文件的全路径名判断文件是否存在
* @param file
* @return
*/
private boolean fileExists(String file) {
boolean fileExists = false;
Path path = Paths.get(file);
fileExists = Files.exists(path,new LinkOption[]{ LinkOption.NOFOLLOW_LINKS});
return fileExists;
}
}
entity
package com.test.entity;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
@TableName("t_chunk")
@Data
@ApiModel(value = "文件块", description = "文件块")
public class ChunkEntity {
/**
* 主键ID
*/
@TableId
@ApiModelProperty(value = "主键ID")
private String id;
/**
* 文件块编号,从1开始
*/
@ApiModelProperty(value = "文件块编号,从1开始")
private Integer chunkNumber;
/**
* 每块大小
*/
@ApiModelProperty(value = "每块大小")
private Long chunkSize;
/**
* 当前分块大小
*/
@ApiModelProperty(value = "当前分块大小")
private Long currentChunkSize;
/**
* 总大小
*/
@ApiModelProperty(value = "总大小")
private Long totalSize;
/**
* 文件标识MD5
*/
@ApiModelProperty(value = "文件标识MD5")
private String identifier;
/**
* 文件名
*/
@ApiModelProperty(value = "文件名")
private String filename;
/**
* 相对路径
*/
@ApiModelProperty(value = "相对路径")
private String relativePath;
/**
* 总块数
*/
@ApiModelProperty(value = "总块数")
private Integer totalChunks;
/**
* 块内容
*/
@TableField(exist = false)
@ApiModelProperty(value = "块内容")
private MultipartFile upfile;
}
package com.test.entity;
import java.util.Date;
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.annotation.JsonFormat;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
@TableName("t_file_list")
@Data
@ApiModel(value = "文件对象", description = "文件对象")
public class FileListEntity {
/**
* 主键ID
*/
@TableId
@ApiModelProperty(value = "主键ID")
private String id;
/**
* 文件名
*/
@ApiModelProperty(value = "文件名")
private String filename;
/**
* 文件标识MD5
*/
@ApiModelProperty(value = "文件标识MD5")
private String identifier;
/**
* 总大小
*/
@ApiModelProperty(value = "总大小")
private Long totalSize;
/**
* 地址
*/
@ApiModelProperty(value = "地址")
private String location;
/**
* 是否删除: 0.否 1.是
*/
@ApiModelProperty(value = "是否删除: 0.否 1.是")
private Integer delFlag;
/**
* 创建时间
*/
@ApiModelProperty(value = "创建时间", hidden = true)
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date createTime;
/**
* 文件大小带单位
*/
@TableField(exist = false)
@ApiModelProperty(value = "文件大小带单位")
private String totalSizeName;
public void setTotalSize(Long totalSize) {
this.totalSize = totalSize;
if(1024*1024 > this.totalSize && this.totalSize >= 1024 ) {
this.totalSizeName = String.format("%.2f",this.totalSize.doubleValue()/1024) + "KB";
}else if(1024*1024*1024 > this.totalSize && this.totalSize >= 1024*1024 ) {
this.totalSizeName = String.format("%.2f",this.totalSize.doubleValue()/(1024*1024)) + "MB";
}else if(this.totalSize >= 1024*1024*1024 ) {
this.totalSizeName = String.format("%.2f",this.totalSize.doubleValue()/(1024*1024*1024)) + "GB";
}else {
this.totalSizeName = this.totalSize.toString() + "B";
}
}
}
返回接口封装Result
package com.test.utils;
import java.io.Serializable;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
/**
* 接口返回数据格式
*
*/
@Data
@ApiModel(value = "接口返回对象", description = "接口返回对象")
public class Result<T> implements Serializable {
private static final long serialVersionUID = 1L;
/**
* 成功标志
*/
@ApiModelProperty(value = "成功标志")
private boolean success = true;
/**
* 返回处理消息
*/
@ApiModelProperty(value = "返回处理消息")
private String msg = CodeMsg.SUCCESS.getMsg();
/**
* 返回代码
*/
@ApiModelProperty(value = "返回代码")
private Integer code = CodeMsg.SUCCESS.getCode();
/**
* 返回数据对象 data
*/
@ApiModelProperty(value = "返回数据对象")
private Object data;
/**
* 时间戳
*/
@ApiModelProperty(value = "时间戳")
private long timestamp = System.currentTimeMillis();
public Result() {
}
public Result<T> success(String msg) {
this.msg = msg;
this.code = CodeMsg.SUCCESS.getCode();
this.success = true;
return this;
}
public static Result<Object> ok() {
Result<Object> r = new Result<Object>();
r.setSuccess(true);
r.setCode(CodeMsg.SUCCESS.getCode());
r.setMsg(CodeMsg.SUCCESS.getMsg());
return r;
}
public static Result<Object> ok(String msg) {
Result<Object> r = new Result<Object>();
r.setSuccess(true);
r.setCode(CodeMsg.SUCCESS.getCode());
r.setMsg(msg);
return r;
}
public static Result<Object> ok(Object data) {
Result<Object> r = new Result<Object>();
r.setSuccess(true);
r.setCode(CodeMsg.SUCCESS.getCode());
r.setData(data);
return r;
}
public static Result<Object> error() {
Result<Object> r = new Result<Object>();
r.setCode(CodeMsg.SYSTEM_ERROR.getCode());
r.setMsg(CodeMsg.SYSTEM_ERROR.getMsg());
r.setSuccess(false);
return r;
}
public static Result<Object> error(String msg) {
return error(CodeMsg.SYSTEM_ERROR.getCode(), msg);
}
public static Result<Object> error(int code, String msg) {
Result<Object> r = new Result<Object>();
r.setCode(code);
r.setMsg(msg);
r.setSuccess(false);
return r;
}
}
表结构
DROP TABLE IF EXISTS `t_chunk`;
CREATE TABLE `t_chunk` (
`id` varchar(64) NOT NULL COMMENT '主键ID',
`chunk_number` int(11) NOT NULL COMMENT '文件块编号,从1开始',
`chunk_size` bigint(20) NOT NULL COMMENT '分块大小',
`current_chunk_size` bigint(20) NOT NULL COMMENT '当前分块大小',
`identifier` varchar(64) NOT NULL COMMENT '文件标识MD5',
`filename` varchar(500) NOT NULL COMMENT '文件名',
`relative_path` varchar(500) NOT NULL COMMENT '相对路径',
`total_chunks` int(11) NOT NULL COMMENT '总块数',
`total_size` bigint(20) NOT NULL COMMENT '总大小',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件块';
DROP TABLE IF EXISTS `t_file_list`;
CREATE TABLE `t_file_list` (
`id` varchar(64) NOT NULL COMMENT '主键ID',
`filename` varchar(500) NOT NULL COMMENT '文件名',
`identifier` varchar(64) NOT NULL COMMENT '唯一标识MD5',
`total_size` bigint(20) NOT NULL COMMENT '文件总大小',
`location` varchar(200) NOT NULL COMMENT '地址',
`del_flag` int(1) NOT NULL DEFAULT '0' COMMENT '是否删除: 0.否 1.是',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件列表';
前端代码
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<title>大文件分片上传示例</title>
<script src="jquery.min.js"></script>
<script src="spark-md5.js"></script>
<script>
//vari = -1;
var chunkSize = 5 * 1024 * 1024; //以5MB为一个分片
var succeed = 0;
var currentIndex = 0;
var shardCount = 0;
var databgein; //开始时间
var dataend; //结束时间
var page = {
init: function () {
$("#upload").click(function () {
//清空
$("#usetime").text('');
$("#param").text('');
$("#output").text('');
databgein = new Date();
var file = $("#file")[0].files[0]; //文件对象
if(file == null){
alert("文件不能为空");
return;
}
computeMD5(file);
});
$("#querybut").click(function () {
var json = {"page":1,"limit": 10,"name": ""};
//Ajax提交
$.ajax({
url: "http://localhost:8080/fileservice/file/selectFileList",
type: "POST",
data: JSON.stringify(json),
dataType: "json",
async: true, //异步
contentType: "application/json;charset=utf-8",
success: function (data) {
$("#filediv").text("");
if(data.data.records != null){
$(data.data.records).each(function(){
var tr = $("#filediv").append("<tr></tr>");
tr.append("<td>"+ this.identifier +"</td>");
tr.append("<td>"+ this.filename +"</td>");
tr.append("<td>"+ this.location +"</td>");
})
}
}, error: function (XMLHttpRequest, textStatus, errorThrown) {
alert("服务器出错!");
}
});
});
}
};
$(function () {
page.init();
});
/**
* 计算md5,实现断点续传及秒传
* @param file
*/
function computeMD5(file) {
let fileReader = new FileReader();
let time = new Date().getTime();
let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
let currentChunk = 0;
let chunks = Math.ceil(file.size / chunkSize);
let spark = new SparkMD5.ArrayBuffer();
loadNext();
fileReader.onload = (e => {
spark.append(e.target.result);
if (currentChunk < chunks) {
currentChunk++;
loadNext();
// console.log('校验MD5 ' + ((currentChunk / chunks) * 100).toFixed(0) + '%');
$("#md5span").text( + ((currentChunk / chunks) * 100).toFixed(0) + '%');
} else {
let md5 = spark.end();
console.log(`MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${file.size} 用时:${new Date().getTime() - time} ms`);
isUpload(file, md5);
}
});
function loadNext() {
let start = currentChunk * chunkSize;
let end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
}
}
function isUpload(file, md5) {
//Ajax提交
$.ajax({
url: "http://localhost:8080/fileservice/file/upload?"+"identifier="+md5,
type: "GET",
async: true, //异步
processData: false, //很重要,告诉jquery不要对form进行处理
contentType: false, //很重要,指定为false才能形成正确的Content-Type
success: function (data) {
// 服务器分片校验函数,秒传及断点续传基础
if (data.data.skipUpload) {
dataend = new Date();
$("#usetime").text(dataend.getTime() - databgein.getTime());
$("#rate").text("100");
$("#output").text(shardCount + " / " + shardCount);
}else{
repeatupload(file, md5, data.data.uploadedChunks);
}
}, error: function (XMLHttpRequest, textStatus, errorThrown) {
alert("服务器出错!");
}
});
}
function repeatupload(file, filemd5, uploadedChunks) {
size = file.size; //总大小
shardCount = Math.ceil(size / chunkSize); //总片数
for (var i = 0; i < shardCount; i++) {
var chunkNumber = i+1;
if(uploadedChunks != null && uploadedChunks.indexOf(chunkNumber) >= 0){
console.log(chunkNumber+"分片已存在");
//如果分片存在就不用上传了
uploadChunks(file.name, filemd5, size);
continue;
}
upload(file, filemd5, uploadedChunks, chunkNumber);
}
}
/*
*上传每一分片
* file 文件对象
* filemd5 整个文件的md5
* date 文件第一个分片上传的日期(如:20170122)
* i 文件第i个分片
* type 1为检测;2为上传
*/
function upload(file, filemd5, uploadedChunks, chunkNumber) {
//计算每一片的起始与结束位置
var start = (chunkNumber-1) * chunkSize,
end = Math.min(size, start + chunkSize);
//构造一个表单,FormData是HTML5新增的
var form = new FormData();
//按大小切割文件段
var data = file.slice(start, end);
form.append("chunkNumber", chunkNumber); //文件块编号,从1开始
form.append("totalChunks", shardCount);
form.append("identifier", filemd5);
form.append("chunkSize", chunkSize);
form.append("currentChunkSize", data.size);
form.append("relativePath", file.name);
form.append("filename", file.name);
form.append("totalSize", size);
form.append("total", shardCount); //总片数
form.append("upfile", data);
//Ajax提交
$.ajax({
url: "http://localhost:8080/fileservice/file/upload",
type: "POST",
data: form,
async: true, //异步
processData: false, //很重要,告诉jquery不要对form进行处理
contentType: false, //很重要,指定为false才能形成正确的Content-Type
success: function (data) {
uploadChunks(file.name, filemd5, size);
}, error: function (XMLHttpRequest, textStatus, errorThrown) {
alert("服务器出错!");
}
});
}
function uploadChunks(filename, identifier, totalSize) {
//服务器返回分片是否上传成功
++succeed;//改变界面
if(succeed > shardCount){
succeed = shardCount;
}
$("#output").text(succeed + " / " + shardCount);
if (succeed == shardCount) {
merge(filename, identifier, totalSize);
dataend = new Date();
$("#usetime").text(dataend.getTime() - databgein.getTime());
}
//进度
++currentIndex;
if(currentIndex > shardCount){
currentIndex = shardCount;
}
$("#rate").text(((currentIndex / shardCount) * 100).toFixed(0));
}
function merge(filename, identifier, totalSize) {
var json = {"filename":filename,"identifier": identifier,"totalSize": totalSize};
//Ajax提交
$.ajax({
url: "http://localhost:8080/fileservice/file/merge",
type: "POST",
data: JSON.stringify(json),
dataType: "json",
async: true, //异步
contentType: "application/json;charset=utf-8",
success: function (data) {
if (data.code != 200) {
alert("服务器出错!");
}
}, error: function (XMLHttpRequest, textStatus, errorThrown) {
alert("服务器出错!");
}
});
}
</script>
</head>
<body>
<input type="file" id="file"/>
<button id="upload">上传</button>
<span style="font-size:12px">校验MD5: <span id="md5span"></span></span>
<span style="font-size:12px;margin-left:20px;">等待: <span id="output"></span></span>
<span style="font-size:12px;margin-left:20px;">进度: <span id="rate"></span>%</span>
<span style="font-size:12px;margin-left:20px;">用时: <span id="usetime"></span></span>
<br/>
<br/>
<br/>
<br/>
<button id="querybut">查询</button>
<h2>文件列表</h2>
<table>
<thead>
<tr>
<td>md5</td>
<td>文件名</td>
<td>地址</td>
</tr>
</thead>
<tbody id="filediv"></tbody>
</table>
</body>
</html>
总结:
因为本文章只是简单示例,仅供参考,实际开发中可能会碰到一些问题,我的原则就是:来一个再解决一个,绝不提前优化。
- 并发上传同一文件的情况,可以由前端生成唯一id,后端通过id和md5值组合的方式作为判断条件。
- upload接口分为:上传文件块(POST)和检查文件块(GET)。
更多推荐
所有评论(0)