mirror of
https://github.com/ragestudio/comty.git
synced 2025-06-09 10:34:17 +00:00
Update StorageClient with CDN support and B2 integration
This commit is contained in:
parent
ad860f51d3
commit
0c7b6d7720
@ -1,5 +1,5 @@
|
||||
const Minio = require("minio")
|
||||
import path from "path"
|
||||
import path from "node:path"
|
||||
import { Client } from "minio"
|
||||
|
||||
export const generateDefaultBucketPolicy = (payload) => {
|
||||
const { bucketName } = payload
|
||||
@ -12,30 +12,26 @@ export const generateDefaultBucketPolicy = (payload) => {
|
||||
Version: "2012-10-17",
|
||||
Statement: [
|
||||
{
|
||||
Action: [
|
||||
"s3:GetObject"
|
||||
],
|
||||
Action: ["s3:GetObject"],
|
||||
Effect: "Allow",
|
||||
Principal: {
|
||||
AWS: [
|
||||
"*"
|
||||
]
|
||||
AWS: ["*"],
|
||||
},
|
||||
Resource: [`arn:aws:s3:::${bucketName}/*`],
|
||||
Sid: "",
|
||||
},
|
||||
Resource: [
|
||||
`arn:aws:s3:::${bucketName}/*`
|
||||
],
|
||||
Sid: ""
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
export class StorageClient extends Minio.Client {
|
||||
export class StorageClient extends Client {
|
||||
constructor(options) {
|
||||
super(options)
|
||||
|
||||
this.defaultBucket = String(options.defaultBucket)
|
||||
this.defaultRegion = String(options.defaultRegion)
|
||||
this.setupBucket = Boolean(options.setupBucket)
|
||||
this.cdnUrl = options.cdnUrl
|
||||
}
|
||||
|
||||
composeRemoteURL = (key, extraKey) => {
|
||||
@ -45,6 +41,10 @@ export class StorageClient extends Minio.Client {
|
||||
_path = path.join(_path, extraKey)
|
||||
}
|
||||
|
||||
if (this.cdnUrl) {
|
||||
return `${this.cdnUrl}/${_path}`
|
||||
}
|
||||
|
||||
return `${this.protocol}//${this.host}:${this.port}/${_path}`
|
||||
}
|
||||
|
||||
@ -57,11 +57,14 @@ export class StorageClient extends Minio.Client {
|
||||
initialize = async () => {
|
||||
console.log("🔌 Checking if storage client have default bucket...")
|
||||
|
||||
if (this.setupBucket !== false) {
|
||||
try {
|
||||
const bucketExists = await this.bucketExists(this.defaultBucket)
|
||||
|
||||
if (!bucketExists) {
|
||||
console.warn("🪣 Default bucket not exists! Creating new bucket...")
|
||||
console.warn(
|
||||
"🪣 Default bucket not exists! Creating new bucket...",
|
||||
)
|
||||
|
||||
await this.makeBucket(this.defaultBucket, "s3")
|
||||
|
||||
@ -69,12 +72,17 @@ export class StorageClient extends Minio.Client {
|
||||
await this.setDefaultBucketPolicy(this.defaultBucket)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Failed to check if default bucket exists or create default bucket >`, error)
|
||||
console.error(
|
||||
`Failed to check if default bucket exists or create default bucket >`,
|
||||
error,
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
// check if default bucket policy exists
|
||||
const bucketPolicy = await this.getBucketPolicy(this.defaultBucket).catch(() => {
|
||||
const bucketPolicy = await this.getBucketPolicy(
|
||||
this.defaultBucket,
|
||||
).catch(() => {
|
||||
return null
|
||||
})
|
||||
|
||||
@ -83,7 +91,11 @@ export class StorageClient extends Minio.Client {
|
||||
await this.setDefaultBucketPolicy(this.defaultBucket)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Failed to get or set default bucket policy >`, error)
|
||||
console.error(
|
||||
`Failed to get or set default bucket policy >`,
|
||||
error,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
console.log("✅ Storage client is ready.")
|
||||
|
@ -23,6 +23,7 @@ export type S3UploadPayload = {
|
||||
filePath: string
|
||||
basePath: string
|
||||
targetPath?: string
|
||||
s3Provider?: string
|
||||
onProgress?: Function
|
||||
}
|
||||
|
||||
@ -38,8 +39,9 @@ export default class Upload {
|
||||
const result = await Upload.toS3({
|
||||
filePath: payload.filePath,
|
||||
targetPath: payload.targetPath,
|
||||
basePath: `${payload.user_id}/${global.nanoid()}`,
|
||||
basePath: payload.user_id,
|
||||
onProgress: payload.onProgress,
|
||||
s3Provider: payload.s3Provider,
|
||||
})
|
||||
|
||||
// delete workpath
|
||||
@ -76,21 +78,22 @@ export default class Upload {
|
||||
}
|
||||
|
||||
static toS3 = async (payload: S3UploadPayload) => {
|
||||
const { filePath, basePath, targetPath, onProgress } = payload
|
||||
const { filePath, basePath, targetPath, s3Provider, onProgress } =
|
||||
payload
|
||||
|
||||
// if targetPath is provided, means its a directory
|
||||
const isDirectory = targetPath !== undefined
|
||||
|
||||
let uploadPath = path.resolve(basePath, path.basename(filePath))
|
||||
|
||||
if (isDirectory) {
|
||||
uploadPath = basePath
|
||||
}
|
||||
const isDirectory = !!targetPath
|
||||
|
||||
const metadata = await this.buildFileMetadata(
|
||||
isDirectory ? targetPath : filePath,
|
||||
)
|
||||
|
||||
let uploadPath = path.join(basePath, metadata["File-Hash"])
|
||||
|
||||
if (isDirectory) {
|
||||
uploadPath = path.join(basePath, nanoid())
|
||||
}
|
||||
|
||||
if (typeof onProgress === "function") {
|
||||
onProgress({
|
||||
percent: 0,
|
||||
@ -98,19 +101,21 @@ export default class Upload {
|
||||
})
|
||||
}
|
||||
|
||||
console.log("Uploading to S3:", {
|
||||
filePath,
|
||||
uploadPath,
|
||||
basePath,
|
||||
targetPath,
|
||||
metadata,
|
||||
})
|
||||
// console.log("Uploading to S3:", {
|
||||
// filePath: filePath,
|
||||
// basePath: basePath,
|
||||
// uploadPath: uploadPath,
|
||||
// targetPath: targetPath,
|
||||
// metadata: metadata,
|
||||
// s3Provider: s3Provider,
|
||||
// })
|
||||
|
||||
const result = await putObject({
|
||||
filePath: filePath,
|
||||
uploadPath: uploadPath,
|
||||
metadata: metadata,
|
||||
targetFilename: isDirectory ? path.basename(targetPath) : null,
|
||||
provider: s3Provider,
|
||||
})
|
||||
|
||||
return result
|
||||
|
@ -8,7 +8,14 @@ export default async function putObject({
|
||||
metadata = {},
|
||||
targetFilename,
|
||||
onFinish,
|
||||
provider = "standard",
|
||||
}) {
|
||||
const providerClass = global.storages[provider]
|
||||
|
||||
if (!providerClass) {
|
||||
throw new Error(`Provider [${provider}] not found`)
|
||||
}
|
||||
|
||||
const isDirectory = await fs.promises
|
||||
.lstat(filePath)
|
||||
.then((stats) => stats.isDirectory())
|
||||
@ -31,13 +38,13 @@ export default async function putObject({
|
||||
|
||||
return {
|
||||
id: uploadPath,
|
||||
url: global.storage.composeRemoteURL(uploadPath, targetFilename),
|
||||
url: providerClass.composeRemoteURL(uploadPath, targetFilename),
|
||||
metadata: metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// upload to storage
|
||||
await global.storage.fPutObject(
|
||||
await providerClass.fPutObject(
|
||||
process.env.S3_BUCKET,
|
||||
uploadPath,
|
||||
filePath,
|
||||
@ -46,7 +53,7 @@ export default async function putObject({
|
||||
|
||||
const result = {
|
||||
id: uploadPath,
|
||||
url: global.storage.composeRemoteURL(uploadPath),
|
||||
url: providerClass.composeRemoteURL(uploadPath),
|
||||
metadata: metadata,
|
||||
}
|
||||
|
||||
|
@ -25,13 +25,13 @@ class API extends Server {
|
||||
contexts = {
|
||||
db: new DbManager(),
|
||||
cache: new CacheService(),
|
||||
storage: StorageClient(),
|
||||
b2Storage: null,
|
||||
SSEManager: new SSEManager(),
|
||||
redis: RedisClient({
|
||||
maxRetriesPerRequest: null,
|
||||
}),
|
||||
limits: {},
|
||||
storage: StorageClient(),
|
||||
b2Storage: null,
|
||||
}
|
||||
|
||||
queuesManager = new TaskQueueManager(
|
||||
@ -45,14 +45,18 @@ class API extends Server {
|
||||
global.sse = this.contexts.SSEManager
|
||||
|
||||
if (process.env.B2_KEY_ID && process.env.B2_APP_KEY) {
|
||||
this.contexts.b2Storage = new B2({
|
||||
applicationKeyId: process.env.B2_KEY_ID,
|
||||
applicationKey: process.env.B2_APP_KEY,
|
||||
this.contexts.b2Storage = StorageClient({
|
||||
endPoint: process.env.B2_ENDPOINT,
|
||||
cdnUrl: process.env.B2_CDN_ENDPOINT,
|
||||
defaultBucket: process.env.B2_BUCKET,
|
||||
accessKey: process.env.B2_KEY_ID,
|
||||
secretKey: process.env.B2_APP_KEY,
|
||||
port: 443,
|
||||
useSSL: true,
|
||||
setupBucket: false,
|
||||
})
|
||||
|
||||
global.b2Storage = this.contexts.b2Storage
|
||||
|
||||
await this.contexts.b2Storage.authorize()
|
||||
await this.contexts.b2Storage.initialize()
|
||||
} else {
|
||||
console.warn(
|
||||
"B2 storage not configured on environment, skipping...",
|
||||
@ -66,7 +70,10 @@ class API extends Server {
|
||||
await this.contexts.db.initialize()
|
||||
await this.contexts.storage.initialize()
|
||||
|
||||
global.storage = this.contexts.storage
|
||||
global.storages = {
|
||||
standard: this.contexts.storage,
|
||||
b2: this.contexts.b2Storage,
|
||||
}
|
||||
global.queues = this.queuesManager
|
||||
|
||||
this.contexts.limits = await LimitsClass.get()
|
||||
|
@ -35,7 +35,7 @@ export default {
|
||||
1024 *
|
||||
1024,
|
||||
useCompression: true,
|
||||
useProvider: "standard",
|
||||
useProvider: req.headers["use-provider"] ?? "standard",
|
||||
}
|
||||
|
||||
// const user = await req.auth.user()
|
||||
@ -85,6 +85,7 @@ export default {
|
||||
filePath: assemble.filePath,
|
||||
workPath: workPath,
|
||||
transformations: transformations,
|
||||
s3Provider: config.useProvider,
|
||||
}
|
||||
|
||||
// if has transformations, use background job
|
||||
|
Loading…
x
Reference in New Issue
Block a user