Moving from Local to Cloud
The abstraction
Your app should not know or care whether files are on local disk or in S3. A storage interface hides the implementation:
// src/storage-interface.ts
export interface StorageProvider {
save(key: string, data: Buffer, mimeType: string): Promise<void>;
read(key: string): Promise<{ stream: ReadableStream; size: number; mimeType: string }>;
delete(key: string): Promise<void>;
getSignedUrl(key: string, expiresIn: number): Promise<string>;
} Four operations: save, read, delete, and get a signed URL. Every storage backend implements these.
Local storage provider
// src/providers/local.ts
import { writeFileSync, createReadStream, statSync, unlinkSync } from "node:fs";
import { join } from "node:path";
import { Readable } from "node:stream";
import { createHmac } from "node:crypto";
import type { StorageProvider } from "../storage-interface.js";
const UPLOAD_DIR = join(process.cwd(), "uploads");
export const localProvider: StorageProvider = {
async save(key, data, mimeType) {
writeFileSync(join(UPLOAD_DIR, key), data);
},
async read(key) {
const filePath = join(UPLOAD_DIR, key);
const stat = statSync(filePath);
const nodeStream = createReadStream(filePath);
return {
stream: Readable.toWeb(nodeStream) as ReadableStream,
size: stat.size,
mimeType: "application/octet-stream", // Look up from DB in practice
};
},
async delete(key) {
try {
unlinkSync(join(UPLOAD_DIR, key));
} catch {}
},
async getSignedUrl(key, expiresIn) {
const expiresAt = Math.floor(Date.now() / 1000) + expiresIn;
const payload = `${key}:${expiresAt}`;
const sig = createHmac("sha256", "secret").update(payload).digest("hex");
return `/files/signed/${key}?expires=${expiresAt}&sig=${sig}`;
},
}; S3 storage provider
// src/providers/s3.ts
import {
S3Client,
PutObjectCommand,
GetObjectCommand,
DeleteObjectCommand,
} from "@aws-sdk/client-s3";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
import type { StorageProvider } from "../storage-interface.js";
const s3 = new S3Client({
/* config */
});
const BUCKET = process.env.S3_BUCKET!;
export const s3Provider: StorageProvider = {
async save(key, data, mimeType) {
await s3.send(
new PutObjectCommand({
Bucket: BUCKET,
Key: key,
Body: data,
ContentType: mimeType,
}),
);
},
async read(key) {
const response = await s3.send(new GetObjectCommand({ Bucket: BUCKET, Key: key }));
return {
stream: response.Body as ReadableStream,
size: response.ContentLength ?? 0,
mimeType: response.ContentType ?? "application/octet-stream",
};
},
async delete(key) {
await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: key }));
},
async getSignedUrl(key, expiresIn) {
return getSignedUrl(s3, new GetObjectCommand({ Bucket: BUCKET, Key: key }), { expiresIn });
},
}; Choosing the provider
// src/storage.ts
import type { StorageProvider } from "./storage-interface.js";
import { localProvider } from "./providers/local.js";
// import { s3Provider } from "./providers/s3.js";
export const storage: StorageProvider =
process.env.STORAGE_PROVIDER === "s3" ? s3Provider : localProvider; One environment variable switches the storage backend. No route changes needed.
Using the abstraction in routes
import { storage } from "../storage.js";
route.post("/files", {
resolve: async (c) => {
// ... parse and validate ...
await storage.save(storedName, buffer, validType);
// ... record in database ...
},
});
route.get("/files/:id/download", {
resolve: async (c) => {
const file = db.prepare("SELECT * FROM files WHERE id = ?").get(c.params.id) as any;
if (!file) return Response.json({ error: "Not found" }, { status: 404 });
const { stream, size } = await storage.read(file.stored_name);
return new Response(stream, {
headers: {
"content-type": file.mime_type,
"content-length": String(size),
},
});
},
});
route.delete("/files/:id", {
resolve: async (c) => {
const file = db.prepare("SELECT * FROM files WHERE id = ?").get(c.params.id) as any;
if (!file) return Response.json({ error: "Not found" }, { status: 404 });
await storage.delete(file.stored_name);
db.prepare("DELETE FROM files WHERE id = ?").run(c.params.id);
return new Response(null, { status: 204 });
},
}); The route code is identical regardless of whether files are on local disk or S3. Switching storage is a configuration change, not a code change.
Exercises
Exercise 1: Implement the StorageProvider interface with the local provider. Refactor your routes to use storage.save, storage.read, and storage.delete.
Exercise 2: Add logging to the storage interface (wrap the provider in a logging decorator). Log every save, read, and delete with the key and timing.
Exercise 3: If you have S3 or R2 access, implement the S3 provider. Switch between local and S3 with STORAGE_PROVIDER=s3.
What is the main benefit of abstracting the storage layer behind an interface?