mirror of
https://github.com/ragestudio/comty.git
synced 2025-06-09 10:34:17 +00:00
binary chunk upload
This commit is contained in:
parent
d6248827c6
commit
cc57742bc8
@ -24,7 +24,7 @@ export function checkChunkUploadHeaders(headers) {
|
||||
"uploader-chunk-number",
|
||||
"uploader-chunks-total",
|
||||
"uploader-original-name",
|
||||
"uploader-file-id"
|
||||
"uploader-file-id",
|
||||
]
|
||||
|
||||
for (const header of requiredHeaders) {
|
||||
@ -33,8 +33,9 @@ export function checkChunkUploadHeaders(headers) {
|
||||
}
|
||||
|
||||
if (
|
||||
(header === "uploader-chunk-number" || header === "uploader-chunks-total")
|
||||
&& !/^[0-9]+$/.test(headers[header])
|
||||
(header === "uploader-chunk-number" ||
|
||||
header === "uploader-chunks-total") &&
|
||||
!/^[0-9]+$/.test(headers[header])
|
||||
) {
|
||||
return false
|
||||
}
|
||||
@ -43,13 +44,13 @@ export function checkChunkUploadHeaders(headers) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
export function createAssembleChunksPromise({
|
||||
chunksPath,
|
||||
filePath,
|
||||
maxFileSize,
|
||||
}) {
|
||||
return () => new Promise(async (resolve, reject) => {
|
||||
return () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
let fileSize = 0
|
||||
|
||||
if (!fs.existsSync(chunksPath)) {
|
||||
@ -74,14 +75,21 @@ export function createAssembleChunksPromise({
|
||||
const chunkPath = path.join(chunksPath, chunk)
|
||||
|
||||
if (!fs.existsSync(chunkPath)) {
|
||||
return reject(new OperationError(500, "No chunk data found"))
|
||||
return reject(
|
||||
new OperationError(500, "No chunk data found"),
|
||||
)
|
||||
}
|
||||
|
||||
const data = await fs.promises.readFile(chunkPath)
|
||||
fileSize += data.length
|
||||
|
||||
if (fileSize > maxFileSize) {
|
||||
return reject(new OperationError(413, "File exceeds max total file size, aborting assembly..."))
|
||||
return reject(
|
||||
new OperationError(
|
||||
413,
|
||||
"File exceeds max total file size, aborting assembly...",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
await fs.promises.appendFile(filePath, data)
|
||||
@ -96,17 +104,15 @@ export function createAssembleChunksPromise({
|
||||
|
||||
export async function handleChunkFile(
|
||||
fileStream,
|
||||
{
|
||||
tmpDir,
|
||||
headers,
|
||||
maxFileSize,
|
||||
maxChunkSize
|
||||
}
|
||||
{ tmpDir, headers, maxFileSize, maxChunkSize },
|
||||
) {
|
||||
return await new Promise(async (resolve, reject) => {
|
||||
const workPath = path.join(tmpDir, headers["uploader-file-id"])
|
||||
const chunksPath = path.join(workPath, "chunks")
|
||||
const chunkPath = path.join(chunksPath, headers["uploader-chunk-number"])
|
||||
const chunkPath = path.join(
|
||||
chunksPath,
|
||||
headers["uploader-chunk-number"],
|
||||
)
|
||||
|
||||
const chunkCount = +headers["uploader-chunk-number"]
|
||||
const totalChunks = +headers["uploader-chunks-total"]
|
||||
@ -122,7 +128,7 @@ export async function handleChunkFile(
|
||||
// if is the first chunk check if dir exists before write things
|
||||
if (chunkCount === 0) {
|
||||
try {
|
||||
if (!await fs.promises.stat(chunksPath).catch(() => false)) {
|
||||
if (!(await fs.promises.stat(chunksPath).catch(() => false))) {
|
||||
await fs.promises.mkdir(chunksPath, { recursive: true })
|
||||
}
|
||||
} catch (error) {
|
||||
@ -141,22 +147,34 @@ export async function handleChunkFile(
|
||||
writeStream.on("close", () => {
|
||||
if (maxChunkSize !== undefined) {
|
||||
if (dataWritten > maxChunkSize) {
|
||||
reject(new OperationError(413, "Chunk size exceeds max chunk size, aborting upload..."))
|
||||
reject(
|
||||
new OperationError(
|
||||
413,
|
||||
"Chunk size exceeds max chunk size, aborting upload...",
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// estimate total file size,
|
||||
// if estimation exceeds maxFileSize, abort upload
|
||||
if (chunkCount === 0 && totalChunks > 0) {
|
||||
if ((dataWritten * (totalChunks - 1)) > maxFileSize) {
|
||||
reject(new OperationError(413, "File estimated size exceeds max total file size, aborting upload..."))
|
||||
if (dataWritten * (totalChunks - 1) > maxFileSize) {
|
||||
reject(
|
||||
new OperationError(
|
||||
413,
|
||||
"File estimated size exceeds max total file size, aborting upload...",
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isLast) {
|
||||
const mimetype = mimetypes.lookup(headers["uploader-original-name"])
|
||||
const mimetype = mimetypes.lookup(
|
||||
headers["uploader-original-name"],
|
||||
)
|
||||
const extension = mimetypes.extension(mimetype)
|
||||
|
||||
let filename = headers["uploader-file-id"]
|
||||
@ -165,12 +183,17 @@ export async function handleChunkFile(
|
||||
filename = `${filename}_${Date.now()}`
|
||||
}
|
||||
|
||||
return resolve(createAssembleChunksPromise({
|
||||
return resolve(
|
||||
createAssembleChunksPromise({
|
||||
// build data
|
||||
chunksPath: chunksPath,
|
||||
filePath: path.resolve(workPath, `${filename}.${extension}`),
|
||||
filePath: path.resolve(
|
||||
workPath,
|
||||
`${filename}.${extension}`,
|
||||
),
|
||||
maxFileSize: maxFileSize,
|
||||
}))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
return resolve(null)
|
||||
@ -186,33 +209,36 @@ export async function handleChunkFile(
|
||||
|
||||
export async function uploadChunkFile(
|
||||
req,
|
||||
{
|
||||
tmpDir,
|
||||
maxFileSize,
|
||||
maxChunkSize,
|
||||
}
|
||||
{ tmpDir, maxFileSize, maxChunkSize },
|
||||
) {
|
||||
return await new Promise(async (resolve, reject) => {
|
||||
// create a readable stream from req.body data blob
|
||||
//
|
||||
const chunkData = new Blob([req.body], { type: "application/octet-stream" })
|
||||
|
||||
console.log(chunkData)
|
||||
|
||||
if (!checkChunkUploadHeaders(req.headers)) {
|
||||
reject(new OperationError(400, "Missing header(s)"))
|
||||
return
|
||||
}
|
||||
|
||||
await req.multipart(async (field) => {
|
||||
try {
|
||||
const result = await handleChunkFile(field.file.stream, {
|
||||
tmpDir: tmpDir,
|
||||
headers: req.headers,
|
||||
maxFileSize: maxFileSize,
|
||||
maxChunkSize: maxChunkSize,
|
||||
})
|
||||
// return await new Promise(async (resolve, reject) => {
|
||||
// // create a readable node stream from "req.body" (octet-stream)
|
||||
// await req.multipart(async (field) => {
|
||||
// try {
|
||||
// const result = await handleChunkFile(field.file.stream, {
|
||||
// tmpDir: tmpDir,
|
||||
// headers: req.headers,
|
||||
// maxFileSize: maxFileSize,
|
||||
// maxChunkSize: maxChunkSize,
|
||||
// })
|
||||
|
||||
return resolve(result)
|
||||
} catch (error) {
|
||||
return reject(error)
|
||||
}
|
||||
})
|
||||
})
|
||||
// return resolve(result)
|
||||
// } catch (error) {
|
||||
// return reject(error)
|
||||
// }
|
||||
// })
|
||||
// })
|
||||
}
|
||||
|
||||
export default uploadChunkFile
|
@ -33,7 +33,6 @@ export default class SSEManager {
|
||||
|
||||
if (!channel) {
|
||||
channel = this.createChannel(channelId)
|
||||
//throw new OperationError(404, `Channel [${channelId}] not found`)
|
||||
}
|
||||
|
||||
channel.clients.add(req)
|
||||
@ -43,16 +42,16 @@ export default class SSEManager {
|
||||
res.setHeader("Connection", "keep-alive")
|
||||
res.status(200)
|
||||
|
||||
// if (channel.cache.length > 0) {
|
||||
// for (const oldData of channel.cache) {
|
||||
// this.writeJSONToResponse(res, oldData)
|
||||
// }
|
||||
// }
|
||||
|
||||
this.writeJSONToResponse(res, {
|
||||
event: "connected",
|
||||
})
|
||||
|
||||
if (channel.cache.length > 0) {
|
||||
for (const oldData of channel.cache) {
|
||||
this.writeJSONToResponse(res, oldData)
|
||||
}
|
||||
}
|
||||
|
||||
if (initialData) {
|
||||
this.writeJSONToResponse(res, initialData)
|
||||
}
|
||||
|
208
packages/server/services/files/classes/ChunkFileUpload/index.js
Executable file
208
packages/server/services/files/classes/ChunkFileUpload/index.js
Executable file
@ -0,0 +1,208 @@
|
||||
// Orginal forked from: Buzut/huge-uploader-nodejs
|
||||
// Copyright (c) 2018, Quentin Busuttil All rights reserved.
|
||||
|
||||
import fs from "node:fs"
|
||||
import path from "node:path"
|
||||
import mimetypes from "mime-types"
|
||||
|
||||
export function checkTotalSize(
|
||||
chunkSize, // in bytes
|
||||
totalChunks, // number of chunks
|
||||
maxFileSize, // in bytes
|
||||
) {
|
||||
const totalSize = chunkSize * totalChunks
|
||||
|
||||
if (totalSize > maxFileSize) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
export function checkChunkUploadHeaders(headers) {
|
||||
const requiredHeaders = [
|
||||
"uploader-chunk-number",
|
||||
"uploader-chunks-total",
|
||||
"uploader-original-name",
|
||||
"uploader-file-id",
|
||||
]
|
||||
|
||||
for (const header of requiredHeaders) {
|
||||
if (!headers[header] || typeof headers[header] !== "string") {
|
||||
return false
|
||||
}
|
||||
|
||||
if (
|
||||
(header === "uploader-chunk-number" ||
|
||||
header === "uploader-chunks-total") &&
|
||||
!/^[0-9]+$/.test(headers[header])
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
export function createAssembleChunksPromise({
|
||||
chunksPath,
|
||||
filePath,
|
||||
maxFileSize,
|
||||
}) {
|
||||
return () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
let fileSize = 0
|
||||
|
||||
if (!fs.existsSync(chunksPath)) {
|
||||
return reject(new OperationError(500, "No chunks found"))
|
||||
}
|
||||
|
||||
let chunks = await fs.promises.readdir(chunksPath)
|
||||
|
||||
if (chunks.length === 0) {
|
||||
return reject(new OperationError(500, "No chunks found"))
|
||||
}
|
||||
|
||||
// Ordenar los chunks numéricamente
|
||||
chunks = chunks.sort((a, b) => {
|
||||
const aNum = parseInt(a, 10)
|
||||
const bNum = parseInt(b, 10)
|
||||
|
||||
return aNum - bNum
|
||||
})
|
||||
|
||||
for (const chunk of chunks) {
|
||||
const chunkPath = path.join(chunksPath, chunk)
|
||||
|
||||
if (!fs.existsSync(chunkPath)) {
|
||||
return reject(
|
||||
new OperationError(500, "No chunk data found"),
|
||||
)
|
||||
}
|
||||
|
||||
const data = await fs.promises.readFile(chunkPath)
|
||||
fileSize += data.length
|
||||
|
||||
if (fileSize > maxFileSize) {
|
||||
return reject(
|
||||
new OperationError(
|
||||
413,
|
||||
"File exceeds max total file size, aborting assembly...",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
await fs.promises.appendFile(filePath, data)
|
||||
}
|
||||
|
||||
return resolve({
|
||||
chunksLength: chunks.length,
|
||||
filePath: filePath,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
export async function handleChunkFile(
|
||||
fileStream,
|
||||
{ tmpDir, headers, maxFileSize, maxChunkSize },
|
||||
) {
|
||||
return await new Promise(async (resolve, reject) => {
|
||||
const workPath = path.join(tmpDir, headers["uploader-file-id"])
|
||||
const chunksPath = path.join(workPath, "chunks")
|
||||
const chunkPath = path.join(
|
||||
chunksPath,
|
||||
headers["uploader-chunk-number"],
|
||||
)
|
||||
|
||||
const chunkCount = +headers["uploader-chunk-number"]
|
||||
const totalChunks = +headers["uploader-chunks-total"]
|
||||
|
||||
// check if file has all chunks uploaded
|
||||
const isLast = chunkCount === totalChunks - 1
|
||||
|
||||
// make sure chunk is in range
|
||||
if (chunkCount < 0 || chunkCount >= totalChunks) {
|
||||
return reject(new OperationError(500, "Chunk is out of range"))
|
||||
}
|
||||
|
||||
// if is the first chunk check if dir exists before write things
|
||||
if (chunkCount === 0) {
|
||||
try {
|
||||
if (!(await fs.promises.stat(chunksPath).catch(() => false))) {
|
||||
await fs.promises.mkdir(chunksPath, { recursive: true })
|
||||
}
|
||||
} catch (error) {
|
||||
return reject(new OperationError(500, error.message))
|
||||
}
|
||||
}
|
||||
|
||||
let dataWritten = 0
|
||||
|
||||
let writeStream = fs.createWriteStream(chunkPath)
|
||||
|
||||
writeStream.on("error", (err) => {
|
||||
reject(err)
|
||||
})
|
||||
|
||||
writeStream.on("close", () => {
|
||||
if (maxChunkSize !== undefined) {
|
||||
if (dataWritten > maxChunkSize) {
|
||||
reject(
|
||||
new OperationError(
|
||||
413,
|
||||
"Chunk size exceeds max chunk size, aborting upload...",
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// estimate total file size,
|
||||
// if estimation exceeds maxFileSize, abort upload
|
||||
if (chunkCount === 0 && totalChunks > 0) {
|
||||
if (dataWritten * (totalChunks - 1) > maxFileSize) {
|
||||
reject(
|
||||
new OperationError(
|
||||
413,
|
||||
"File estimated size exceeds max total file size, aborting upload...",
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isLast) {
|
||||
const mimetype = mimetypes.lookup(
|
||||
headers["uploader-original-name"],
|
||||
)
|
||||
const extension = mimetypes.extension(mimetype)
|
||||
|
||||
let filename = headers["uploader-file-id"]
|
||||
|
||||
if (headers["uploader-use-date"] === "true") {
|
||||
filename = `${filename}_${Date.now()}`
|
||||
}
|
||||
|
||||
return resolve(
|
||||
createAssembleChunksPromise({
|
||||
// build data
|
||||
chunksPath: chunksPath,
|
||||
filePath: path.resolve(
|
||||
workPath,
|
||||
`${filename}.${extension}`,
|
||||
),
|
||||
maxFileSize: maxFileSize,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
return resolve(null)
|
||||
})
|
||||
|
||||
fileStream.on("data", (buffer) => {
|
||||
dataWritten += buffer.byteLength
|
||||
})
|
||||
|
||||
fileStream.pipe(writeStream)
|
||||
})
|
||||
}
|
@ -1,15 +1,30 @@
|
||||
import path from "path"
|
||||
import fs from "fs"
|
||||
import { Duplex } from "node:stream"
|
||||
import path from "node:path"
|
||||
import fs from "node:fs"
|
||||
import RemoteUpload from "@services/remoteUpload"
|
||||
|
||||
import ChunkFileUpload from "@shared-classes/ChunkFileUpload"
|
||||
import {
|
||||
checkChunkUploadHeaders,
|
||||
handleChunkFile,
|
||||
} from "@classes/ChunkFileUpload"
|
||||
|
||||
const availableProviders = ["b2", "standard"]
|
||||
|
||||
function bufferToStream(bf) {
|
||||
let tmp = new Duplex()
|
||||
tmp.push(bf)
|
||||
tmp.push(null)
|
||||
return tmp
|
||||
}
|
||||
|
||||
export default {
|
||||
useContext: ["cache", "limits"],
|
||||
middlewares: ["withAuthentication"],
|
||||
fn: async (req, res) => {
|
||||
if (!checkChunkUploadHeaders(req.headers)) {
|
||||
reject(new OperationError(400, "Missing header(s)"))
|
||||
return
|
||||
}
|
||||
|
||||
const uploadId = `${req.headers["uploader-file-id"]}_${Date.now()}`
|
||||
|
||||
const tmpPath = path.resolve(
|
||||
@ -47,23 +62,26 @@ export default {
|
||||
throw new OperationError(400, "Invalid provider")
|
||||
}
|
||||
|
||||
let build = await ChunkFileUpload(req, {
|
||||
// create a readable stream from req.body(buffer)
|
||||
const dataStream = bufferToStream(await req.buffer())
|
||||
|
||||
let result = await handleChunkFile(dataStream, {
|
||||
tmpDir: tmpPath,
|
||||
...limits,
|
||||
}).catch((err) => {
|
||||
throw new OperationError(err.code, err.message)
|
||||
headers: req.headers,
|
||||
maxFileSize: limits.maxFileSize,
|
||||
maxChunkSize: limits.maxChunkSize,
|
||||
})
|
||||
|
||||
if (typeof build === "function") {
|
||||
if (typeof result === "function") {
|
||||
try {
|
||||
build = await build()
|
||||
result = await result()
|
||||
|
||||
if (req.headers["transmux"] || limits.useCompression === true) {
|
||||
// add a background task
|
||||
const job = await global.queues.createJob(
|
||||
"remote_upload",
|
||||
{
|
||||
filePath: build.filePath,
|
||||
filePath: result.filePath,
|
||||
parentDir: req.auth.session.user_id,
|
||||
service: limits.useProvider,
|
||||
useCompression: limits.useCompression,
|
||||
@ -81,11 +99,11 @@ export default {
|
||||
return {
|
||||
uploadId: uploadId,
|
||||
sseChannelId: sseChannelId,
|
||||
eventChannelURL: `${req.protocol}://${req.get("host")}/upload/sse_events/${sseChannelId}`,
|
||||
eventChannelURL: `${req.headers["x-forwarded-proto"] || req.protocol}://${req.get("host")}/upload/sse_events/${sseChannelId}`,
|
||||
}
|
||||
} else {
|
||||
const result = await RemoteUpload({
|
||||
source: build.filePath,
|
||||
source: result.filePath,
|
||||
parentDir: req.auth.session.user_id,
|
||||
service: limits.useProvider,
|
||||
useCompression: limits.useCompression,
|
||||
|
Loading…
x
Reference in New Issue
Block a user