use shared classes

This commit is contained in:
SrGooglo 2023-08-26 10:01:10 +00:00
parent d8a5608039
commit 6aea278de8
4 changed files with 3 additions and 217 deletions

View File

@ -11,10 +11,9 @@ import EventEmitter from "@foxify/events"
import { User, Session, Config } from "@shared-classes/DbModels" import { User, Session, Config } from "@shared-classes/DbModels"
import DbManager from "@classes/DbManager" import DbManager from "@shared-classes/DbManager"
import { createStorageClientInstance } from "@classes/StorageClient"
import RedisClient from "@shared-classes/RedisClient" import RedisClient from "@shared-classes/RedisClient"
import StorageClient from "@shared-classes/StorageClient"
import internalEvents from "./events" import internalEvents from "./events"
@ -52,7 +51,7 @@ export default class API {
eventBus = global.eventBus = new EventEmitter() eventBus = global.eventBus = new EventEmitter()
storage = global.storage = createStorageClientInstance() storage = global.storage = StorageClient()
jwtStrategy = global.jwtStrategy = { jwtStrategy = global.jwtStrategy = {
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(), jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),

View File

@ -1,61 +0,0 @@
import fs from "fs"
export default class CacheService {
intervalMaps = new Map()
static deletionInterval = 1000 * 60 * 5
checkDeletionFilepath(filepath) {
try {
const stats = fs.statSync(filepath)
stats.atime = new Date(stats.atime)
if (stats.atime.getTime() + CacheService.deletionInterval < Date.now()) {
fs.promises.unlink(filepath)
} else {
return false
}
return true
} catch (error) {
console.error(error)
fs.promises.unlink(filepath)
return true
}
}
appendToDeletion(filepath) {
// create a interval of 5 minutes to delete the file
// check the last time the file was accessed and if it was accessed in the last 5 minutes
// reset the interval until the file is not accessed for 5 minutes and then delete it
try {
const createInterval = () => {
let interval = setInterval(async () => {
try {
await this.checkDeletionFilepath(filepath)
this.intervalMaps.delete(filepath)
if (!results) {
this.appendToDeletion(filepath)
}
} catch (error) {
return clearInterval(interval)
}
})
return interval
}
this.intervalMaps.set(filepath, createInterval())
} catch (error) {
console.error(error)
return fs.promises.unlink(filepath)
}
}
}

View File

@ -1,58 +0,0 @@
import mongoose from "mongoose"
function getConnectionConfig(obj) {
const { DB_USER, DB_DRIVER, DB_NAME, DB_PWD, DB_HOSTNAME, DB_PORT } = obj
let auth = [
DB_DRIVER ?? "mongodb",
"://",
]
if (DB_USER && DB_PWD) {
auth.push(`${DB_USER}:${DB_PWD}@`)
}
auth.push(DB_HOSTNAME ?? "localhost")
auth.push(`:${DB_PORT ?? "27017"}`)
if (DB_USER) {
auth.push("/?authMechanism=DEFAULT")
}
auth = auth.join("")
return [
auth,
{
dbName: DB_NAME,
useNewUrlParser: true,
useUnifiedTopology: true,
}
]
}
export default class DBManager {
initialize = async (config) => {
console.log("🔌 Connecting to DB...")
const dbConfig = getConnectionConfig(config ?? process.env)
mongoose.set("strictQuery", false)
const connection = await mongoose.connect(...dbConfig)
.catch((err) => {
console.log(`❌ Failed to connect to DB, retrying...\n`)
console.log(error)
// setTimeout(() => {
// this.initialize()
// }, 1000)
return false
})
if (connection) {
console.log(`✅ Connected to DB.`)
}
}
}

View File

@ -1,94 +0,0 @@
const Minio = require("minio")
export const generateDefaultBucketPolicy = (payload) => {
const { bucketName } = payload
if (!bucketName) {
throw new Error("bucketName is required")
}
return {
Version: "2012-10-17",
Statement: [
{
Action: [
"s3:GetObject"
],
Effect: "Allow",
Principal: {
AWS: [
"*"
]
},
Resource: [
`arn:aws:s3:::${bucketName}/*`
],
Sid: ""
}
]
}
}
export class StorageClient extends Minio.Client {
constructor(options) {
super(options)
this.defaultBucket = String(options.defaultBucket)
this.defaultRegion = String(options.defaultRegion)
}
composeRemoteURL = (key) => {
return `${this.protocol}//${this.host}:${this.port}/${this.defaultBucket}/${key}`
}
setDefaultBucketPolicy = async (bucketName) => {
const policy = generateDefaultBucketPolicy({ bucketName })
return this.setBucketPolicy(bucketName, JSON.stringify(policy))
}
initialize = async () => {
console.log("🔌 Checking if storage client have default bucket...")
// check connection with s3
const bucketExists = await this.bucketExists(this.defaultBucket).catch(() => {
return false
})
if (!bucketExists) {
console.warn("🪣 Default bucket not exists! Creating new bucket...")
await this.makeBucket(this.defaultBucket, "s3")
// set default bucket policy
await this.setDefaultBucketPolicy(this.defaultBucket)
}
// check if default bucket policy exists
const bucketPolicy = await this.getBucketPolicy(this.defaultBucket).catch(() => {
return null
})
if (!bucketPolicy) {
// set default bucket policy
await this.setDefaultBucketPolicy(this.defaultBucket)
}
console.log("✅ Storage client is ready.")
}
}
export const createStorageClientInstance = (options) => {
return new StorageClient({
...options,
endPoint: process.env.S3_ENDPOINT,
port: Number(process.env.S3_PORT),
useSSL: toBoolean(process.env.S3_USE_SSL),
accessKey: process.env.S3_ACCESS_KEY,
secretKey: process.env.S3_SECRET_KEY,
defaultBucket: process.env.S3_BUCKET,
defaultRegion: process.env.S3_REGION,
})
}
export default createStorageClientInstance