Add initial Docker setup with API services, database, and Nginx configuration

This commit is contained in:
Jose Eduardo 2025-08-03 20:57:38 -04:00
commit 86798bfd42
6 changed files with 216 additions and 0 deletions

5
.dockerignore Normal file
View File

@ -0,0 +1,5 @@
node_modules
dist
*.log
*.md
.git

23
Dockerfile Normal file
View File

@ -0,0 +1,23 @@
# Use the official Node.js image as the base image
FROM node:22
# Set the working directory inside the container
WORKDIR /app
# Copy package.json and package-lock.json to the working directory
COPY package*.json ./
# Install the application dependencies
RUN yarn install
# Copy the rest of the application files
COPY . .
# Build the NestJS application
RUN yarn build
# Expose the application port
EXPOSE 9999
# Command to run the application
CMD ["node", "dist/main"]

118
docker-compose.yml Normal file
View File

@ -0,0 +1,118 @@
services:
api1:
container_name: api1
hostname: api1
build:
context: .
dockerfile: Dockerfile
networks:
- backend
- payment-processor
volumes:
- .:/app
depends_on:
- redis
- database
deploy:
resources:
limits:
cpus: "0.15"
memory: "80MB"
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:9999" ]
interval: 30s
timeout: 10s
retries: 3
api2:
container_name: api2
hostname: api2
build:
context: .
dockerfile: Dockerfile
depends_on:
- redis
- database
networks:
- backend
- payment-processor
volumes:
- .:/app
deploy:
resources:
limits:
cpus: "0.15"
memory: "80MB"
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:9999" ]
interval: 30s
timeout: 10s
retries: 3
nginx:
image: nginx:latest
container_name: nginx
hostname: nginx
ports:
- "9999:80"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
networks:
- backend
depends_on:
- api1
- api2
deploy:
resources:
limits:
cpus: "0.10"
memory: "20MB"
redis:
image: redis:7.2-alpine
hostname: redis
platform: linux/amd64
ports:
- "6379:6379"
networks:
- backend
deploy:
resources:
limits:
cpus: "0.05"
memory: "20MB"
database:
container_name: database
image: postgres:alpine
hostname: database
platform: linux/amd64
ports:
- "5432:5432"
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U rinha -d rinha" ]
interval: 5s
timeout: 5s
retries: 5
environment:
- POSTGRES_DB=payment
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres_pwd
volumes:
- database_volume:/var/lib/postgresql/data
- ./docker/db_init/:/docker-entrypoint-initdb.d/
networks:
- backend
deploy:
resources:
limits:
cpus: "0.5"
memory: "110MB"
networks:
backend:
driver: bridge
payment-processor:
external: true
volumes:
database_volume:

10
docker/db_init/init.sql Normal file
View File

@ -0,0 +1,10 @@
CREATE UNLOGGED TABLE payments (
id UUID PRIMARY KEY,
correlation_id UUID NOT NULL,
amount DECIMAL NOT NULL,
payment_processor VARCHAR(50) NOT NULL,
created_at TIMESTAMP default now() NOT NULL
);
CREATE INDEX payments_created_at ON payments (created_at );
CREATE INDEX payments_correlation_id ON payments (correlation_id);

9
local.Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM node:22-alpine
WORKDIR /app
COPY package.json .
COPY . .
RUN yarn install -y \
&& yarn build
ENTRYPOINT [ "yarn", "start:dev" ]

51
nginx.conf Normal file
View File

@ -0,0 +1,51 @@
worker_processes auto;
worker_rlimit_nofile 65535;
events {
worker_connections 4096;
use epoll;
multi_accept on;
accept_mutex off;
}
http {
access_log off;
error_log /dev/null crit;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
keepalive_requests 1000000;
client_max_body_size 1k;
client_body_timeout 1s;
client_header_timeout 1s;
upstream backend_pool {
least_conn;
server api1:9999 max_fails=1 fail_timeout=1s weight=1;
server api2:9999 max_fails=1 fail_timeout=1s weight=1;
keepalive 3072;
keepalive_requests 100000;
keepalive_timeout 120s;
}
server {
listen 9999 default_server;
location / {
proxy_pass http://backend_pool;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_cache off;
proxy_store off;
proxy_connect_timeout 100ms;
proxy_send_timeout 2s;
proxy_read_timeout 2s;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
}
}
}