Compare commits

...

2 Commits

Author SHA1 Message Date
Julian Vollmer d94152730e rem 2026-01-26 14:10:16 +01:00
Julian Vollmer 1b047ec874 add pipeline 2026-01-26 14:07:52 +01:00
16 changed files with 1000 additions and 6 deletions

394
.gitea/workflows/deploy.yml Normal file
View File

@ -0,0 +1,394 @@
name: Build, Push and Deploy Florale Emotion Website
on:
push:
branches: [ 'feature/*', 'main', 'master' ]
pull_request:
branches: [ main, master ]
env:
HARBOR_REGISTRY: registry.julianvollmer.de
PROJECT_NAME: florale-emotion
jobs:
feature-branch:
runs-on: ubuntu-latest
timeout-minutes: 60
if: startsWith(github.ref, 'refs/heads/feature/')
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
driver-opts: |
image=moby/buildkit:v0.12.0
buildkitd-flags: --debug
- name: Extract metadata
id: meta
run: |
BRANCH_CLEAN=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9._-]/-/g')
SHORT_SHA="${{ github.sha }}"
SHORT_SHA="${SHORT_SHA:0:8}"
TAG="${BRANCH_CLEAN}-${SHORT_SHA}"
echo "tag=${TAG}" >> $GITHUB_OUTPUT
echo "frontend_image=${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-frontend:${TAG}" >> $GITHUB_OUTPUT
echo "backend_image=${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-backend:${TAG}" >> $GITHUB_OUTPUT
echo "bot_image=${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-bot:${TAG}" >> $GITHUB_OUTPUT
- name: Login to Harbor Registry
run: |
echo "Harbor12345" | docker login ${{ env.HARBOR_REGISTRY }} -u admin --password-stdin
- name: Build and Push Frontend (Feature)
working-directory: ./website
run: |
IMAGE_NAME=${{ steps.meta.outputs.frontend_image }}
for attempt in 1 2 3; do
echo "Build attempt $attempt for feature frontend..."
docker build --no-cache --progress=plain -t "${IMAGE_NAME}" . || {
echo "Build failed on attempt $attempt"
if [ $attempt -eq 3 ]; then
echo "All build attempts failed"
exit 1
fi
sleep 10
continue
}
for push_attempt in 1 2 3; do
echo "Push attempt $push_attempt for feature frontend..."
docker push "${IMAGE_NAME}" && {
echo "Feature frontend push successful on attempt $push_attempt"
break
} || {
echo "Push failed on attempt $push_attempt"
if [ $push_attempt -eq 3 ]; then
echo "All push attempts failed"
exit 1
fi
sleep 30
}
done
break
done
echo "✅ Feature frontend build and push completed successfully!"
echo "🏷️ Tag: ${TAG}"
echo "📦 Image: ${IMAGE_NAME}"
- name: Build and Push Backend (Feature)
working-directory: ./website/backend
env:
IMAGE_NAME: ${{ steps.meta.outputs.backend_image }}
TAG: ${{ steps.meta.outputs.tag }}
run: |
echo "Building backend image: ${IMAGE_NAME}"
docker build --no-cache --progress=plain -t "${IMAGE_NAME}" .
echo "Pushing backend image: ${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
echo "✅ Feature backend build and push completed successfully!"
echo "🏷️ Tag: ${TAG}"
echo "📦 Image: ${IMAGE_NAME}"
- name: Build and Push Social Media Bot (Feature)
working-directory: ./website/social-media-bot
env:
IMAGE_NAME: ${{ steps.meta.outputs.bot_image }}
TAG: ${{ steps.meta.outputs.tag }}
run: |
echo "Building social media bot image: ${IMAGE_NAME}"
docker build --no-cache --progress=plain -t "${IMAGE_NAME}" .
echo "Pushing social media bot image: ${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
echo "✅ Feature social media bot build and push completed successfully!"
echo "🏷️ Tag: ${TAG}"
echo "📦 Image: ${IMAGE_NAME}"
- name: Setup kubectl
uses: azure/setup-kubectl@v3
with:
version: 'latest'
- name: Configure kubectl
run: |
mkdir -p ~/.kube
echo "🔍 Debugging KUBECTLSECRET..."
echo "Secret length: ${#KUBECTLSECRET}"
# Try to decode as base64 first, if that fails, use as plain text
if echo "${{ secrets.KUBECTLSECRET }}" | base64 -d > ~/.kube/config 2>/dev/null; then
echo "✅ KUBECTLSECRET decoded as base64"
else
echo "⚠️ KUBECTLSECRET is not base64, using as plain text"
echo "${{ secrets.KUBECTLSECRET }}" > ~/.kube/config
fi
echo "📁 kubeconfig created at ~/.kube/config"
chmod 600 ~/.kube/config
- name: Test kubectl connection
run: |
kubectl version --client
kubectl get nodes
- name: Deploy Feature Branch
env:
FRONTEND_IMAGE: ${{ steps.meta.outputs.frontend_image }}
BACKEND_IMAGE: ${{ steps.meta.outputs.backend_image }}
BOT_IMAGE: ${{ steps.meta.outputs.bot_image }}
BRANCH_NAME: ${{ github.ref_name }}
run: |
echo "🚀 Deploying feature branch: $BRANCH_NAME"
echo "Frontend image: $FRONTEND_IMAGE"
echo "Backend image: $BACKEND_IMAGE"
echo "Bot image: $BOT_IMAGE"
# Ensure namespace exists
kubectl create namespace florale-emotion --dry-run=client -o yaml | kubectl apply -f -
# Ensure Harbor registry secret exists
kubectl create secret docker-registry harbor-registry-secret \
--docker-server=${{ env.HARBOR_REGISTRY }} \
--docker-username=admin \
--docker-password=Harbor12345 \
--namespace=florale-emotion \
--dry-run=client -o yaml | kubectl apply -f -
# Apply frontend deployment und ersetze das Image-Tag dynamisch (kein latest in Features)
sed "s|__IMAGE_TAG__|${{ steps.meta.outputs.tag }}|g" website/k8s/frontend.yaml | kubectl apply -n florale-emotion -f -
# Apply backend deployment mit Tag-Ersetzung
sed "s|__IMAGE_TAG__|${{ steps.meta.outputs.tag }}|g" website/k8s/backend.yaml | kubectl apply -n florale-emotion -f -
# Apply social media bot deployment mit Tag-Ersetzung
sed "s|__IMAGE_TAG__|${{ steps.meta.outputs.tag }}|g" website/k8s/bot.yaml | kubectl apply -n florale-emotion -f -
# Apply feature branch ingress (using dev subdomain)
kubectl apply -f website/k8s/ingress-dev.yaml -n florale-emotion
# Wait for rollout
echo "⏳ Waiting for rollout to finish..."
kubectl rollout status deployment/florale-emotion-frontend -n florale-emotion --timeout=300s
echo "✅ Feature branch deployment complete!"
echo "🌐 Dev URL: https://dev.florale-emotion.de"
echo "📦 Harbor Registry: https://registry.julianvollmer.de"
production-branch:
runs-on: ubuntu-latest
timeout-minutes: 60
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
driver-opts: |
image=moby/buildkit:v0.12.0
buildkitd-flags: --debug
- name: Extract metadata
id: meta
run: |
BRANCH_CLEAN=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9._-]/-/g')
SHORT_SHA="${{ github.sha }}"
SHORT_SHA="${SHORT_SHA:0:8}"
TAG="${BRANCH_CLEAN}-${SHORT_SHA}"
# Für Master-Branches zusätzlich latest Tag erstellen
echo "Master branch detected, will create latest tags"
echo "tag=${TAG}" >> $GITHUB_OUTPUT
echo "frontend_image=${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-frontend:${TAG}" >> $GITHUB_OUTPUT
echo "backend_image=${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-backend:${TAG}" >> $GITHUB_OUTPUT
echo "bot_image=${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-bot:${TAG}" >> $GITHUB_OUTPUT
- name: Login to Harbor Registry
run: |
echo "Harbor12345" | docker login ${{ env.HARBOR_REGISTRY }} -u admin --password-stdin
- name: Build and Push Frontend (Production + Latest)
working-directory: ./website
run: |
IMAGE_NAME=${{ steps.meta.outputs.frontend_image }}
LATEST_IMAGE="${{ env.HARBOR_REGISTRY }}/${{ env.PROJECT_NAME }}/florale-emotion-frontend:latest"
for attempt in 1 2 3; do
echo "Build attempt $attempt for production frontend..."
docker build --no-cache --progress=plain -t "${IMAGE_NAME}" . || {
echo "Build failed on attempt $attempt"
if [ $attempt -eq 3 ]; then
echo "All build attempts failed"
exit 1
fi
sleep 10
continue
}
# Push versioned image
for push_attempt in 1 2 3; do
echo "Push attempt $push_attempt for production frontend..."
docker push "${IMAGE_NAME}" && {
echo "Production frontend push successful on attempt $push_attempt"
break
} || {
echo "Push failed on attempt $push_attempt"
if [ $push_attempt -eq 3 ]; then
echo "All push attempts failed"
exit 1
fi
sleep 30
}
done
# Create and push latest tag
echo "Creating latest tag for production..."
docker tag "${IMAGE_NAME}" "${LATEST_IMAGE}"
for push_attempt in 1 2 3; do
echo "Push attempt $push_attempt for latest tag..."
docker push "${LATEST_IMAGE}" && {
echo "Latest tag push successful on attempt $push_attempt"
break
} || {
echo "Latest push failed on attempt $push_attempt"
if [ $push_attempt -eq 3 ]; then
echo "Latest push attempts failed"
exit 1
fi
sleep 30
}
done
break
done
echo "✅ Production frontend build and push completed successfully!"
echo "🏷️ Tag: ${TAG}"
echo "📦 Image: ${IMAGE_NAME}"
echo "📦 Latest: ${LATEST_IMAGE}"
- name: Build and Push Backend (Production + Latest)
working-directory: ./website/backend
env:
IMAGE_NAME: ${{ steps.meta.outputs.backend_image }}
TAG: ${{ steps.meta.outputs.tag }}
run: |
echo "Building backend image: ${IMAGE_NAME}"
docker build --no-cache --progress=plain -t "${IMAGE_NAME}" .
echo "Pushing backend image: ${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
LATEST_IMAGE="${IMAGE_NAME%:*}:latest"
echo "Tagging backend as latest: ${LATEST_IMAGE}"
docker tag "${IMAGE_NAME}" "${LATEST_IMAGE}"
docker push "${LATEST_IMAGE}"
echo "✅ Production backend build and push completed successfully!"
echo "🏷️ Tag: ${TAG}"
echo "📦 Image: ${IMAGE_NAME}"
echo "📦 Latest: ${LATEST_IMAGE}"
- name: Build and Push Social Media Bot (Production + Latest)
working-directory: ./website/social-media-bot
env:
IMAGE_NAME: ${{ steps.meta.outputs.bot_image }}
TAG: ${{ steps.meta.outputs.tag }}
run: |
echo "Building social media bot image: ${IMAGE_NAME}"
docker build --no-cache --progress=plain -t "${IMAGE_NAME}" .
echo "Pushing social media bot image: ${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
LATEST_IMAGE="${IMAGE_NAME%:*}:latest"
echo "Tagging social media bot as latest: ${LATEST_IMAGE}"
docker tag "${IMAGE_NAME}" "${LATEST_IMAGE}"
docker push "${LATEST_IMAGE}"
echo "✅ Production social media bot build and push completed successfully!"
echo "🏷️ Tag: ${TAG}"
echo "📦 Image: ${IMAGE_NAME}"
echo "📦 Latest: ${LATEST_IMAGE}"
- name: Setup kubectl
uses: azure/setup-kubectl@v3
with:
version: 'latest'
- name: Configure kubectl
run: |
mkdir -p ~/.kube
echo "🔍 Debugging KUBECTLSECRET..."
echo "Secret length: ${#KUBECTLSECRET}"
# Try to decode as base64 first, if that fails, use as plain text
if echo "${{ secrets.KUBECTLSECRET }}" | base64 -d > ~/.kube/config 2>/dev/null; then
echo "✅ KUBECTLSECRET decoded as base64"
else
echo "⚠️ KUBECTLSECRET is not base64, using as plain text"
echo "${{ secrets.KUBECTLSECRET }}" > ~/.kube/config
fi
echo "📁 kubeconfig created at ~/.kube/config"
chmod 600 ~/.kube/config
- name: Test kubectl connection
run: |
kubectl version --client
kubectl get nodes
- name: Deploy to Production
env:
FRONTEND_IMAGE: ${{ steps.meta.outputs.frontend_image }}
BACKEND_IMAGE: ${{ steps.meta.outputs.backend_image }}
BOT_IMAGE: ${{ steps.meta.outputs.bot_image }}
run: |
echo "🚀 Deploying to production..."
echo "Frontend image: $FRONTEND_IMAGE"
echo "Backend image: $BACKEND_IMAGE"
echo "Bot image: $BOT_IMAGE"
# Ensure namespace exists
kubectl create namespace florale-emotion --dry-run=client -o yaml | kubectl apply -f -
# Ensure Harbor registry secret exists
kubectl create secret docker-registry harbor-registry-secret \
--docker-server=${{ env.HARBOR_REGISTRY }} \
--docker-username=admin \
--docker-password=Harbor12345 \
--namespace=florale-emotion \
--dry-run=client -o yaml | kubectl apply -f -
# Apply frontend deployment und ersetze das Image-Tag mit latest
sed "s|__IMAGE_TAG__|latest|g" website/k8s/frontend.yaml | kubectl apply -n florale-emotion -f -
# Apply backend deployment mit latest Tag
sed "s|__IMAGE_TAG__|latest|g" website/k8s/backend.yaml | kubectl apply -n florale-emotion -f -
# Apply social media bot deployment mit latest Tag
sed "s|__IMAGE_TAG__|latest|g" website/k8s/bot.yaml | kubectl apply -n florale-emotion -f -
# Apply production ingress
kubectl apply -f website/k8s/ingress.yaml -n florale-emotion
# Wait for rollout
echo "⏳ Waiting for rollout to finish..."
kubectl rollout status deployment/florale-emotion-frontend -n florale-emotion --timeout=300s
echo "✅ Production deployment complete!"
echo "🌐 Website: https://florale-emotion.de"
echo "🌐 WWW: https://www.florale-emotion.de"
echo "🔧 Backend API: https://api.florale-emotion.de"
# Hinweise:
# - Angepasst für florale-emotion Projekt
# - Namespace: florale-emotion
# - Dev-Deployment unter dev.florale-emotion.de
# - Production unter florale-emotion.de
# - Unterstützt Frontend, Backend und Social Media Bot
# - Robuste Retry-Mechanismen für Build und Push
# - Eindeutige Tags für bessere Nachverfolgung
# - Timeout-Schutz für alle Deployments

149
DEPLOYMENT.md Normal file
View File

@ -0,0 +1,149 @@
# Florale Emotion - Kubernetes Deployment
Dieses Dokument beschreibt das Deployment-Setup für das Florale Emotion Projekt.
## Übersicht
Das Projekt besteht aus drei Hauptkomponenten:
- **Frontend**: Angular-basierte Website
- **Backend**: Node.js API Server
- **Social Media Bot**: Automatisierte Social Media Posting
## Deployment-Umgebungen
### Development
- **URL**: https://dev.florale-emotion.de
- **API**: https://api-dev.florale-emotion.de
- **Trigger**: Feature-Branches (`feature/*`)
- **Namespace**: `florale-emotion`
### Production
- **URL**: https://florale-emotion.de, https://www.florale-emotion.de
- **API**: https://api.florale-emotion.de
- **Trigger**: Main/Master Branch
- **Namespace**: `florale-emotion`
## Pipeline-Struktur
### Gitea Workflow
Die Pipeline wird durch `.gitea/workflows/deploy.yml` definiert und umfasst:
1. **Feature Branch Deployment**
- Baut und pushed Docker Images mit Branch-spezifischen Tags
- Deployed auf dev.florale-emotion.de
- Verwendet dynamische Image-Tags (kein `latest`)
2. **Production Deployment**
- Baut und pushed sowohl versionierte als auch `latest` Tags
- Deployed auf florale-emotion.de
- Vollständige SSL-Konfiguration
### Docker Images
Alle Images werden in der Harbor Registry gespeichert:
- `registry.julianvollmer.de/florale-emotion/florale-emotion-frontend`
- `registry.julianvollmer.de/florale-emotion/florale-emotion-backend`
- `registry.julianvollmer.de/florale-emotion/florale-emotion-bot`
## Kubernetes-Manifeste
### Frontend (`website/k8s/frontend.yaml`)
- 2 Replicas für High Availability
- Nginx-basiertes Angular Deployment
- Health Checks auf Port 80
- Resource Limits: 256Mi RAM, 200m CPU
### Backend (`website/k8s/backend.yaml`)
- 2 Replicas für Load Balancing
- Node.js API auf Port 3000
- Health Checks auf `/health` Endpoint
- Resource Limits: 512Mi RAM, 300m CPU
### Social Media Bot (`website/k8s/bot.yaml`)
- 1 Replica (Singleton Service)
- Scheduled/Cron-basierte Ausführung
- Resource Limits: 256Mi RAM, 100m CPU
### Ingress Konfiguration
- **Production** (`ingress.yaml`): florale-emotion.de, www.florale-emotion.de, api.florale-emotion.de
- **Development** (`ingress-dev.yaml`): dev.florale-emotion.de, api-dev.florale-emotion.de
- Automatische SSL-Zertifikate via Let's Encrypt
- HTTPS-Weiterleitung aktiviert
## Deployment-Prozess
### Automatisches Deployment
1. Code-Push auf Feature-Branch oder Main
2. Gitea Pipeline startet automatisch
3. Docker Images werden gebaut und gepushed
4. Kubernetes Deployments werden aktualisiert
5. Health Checks bestätigen erfolgreiche Bereitstellung
### Manuelle Schritte (falls nötig)
```bash
# Namespace erstellen
kubectl create namespace florale-emotion
# Harbor Registry Secret erstellen
kubectl create secret docker-registry harbor-registry-secret \
--docker-server=registry.julianvollmer.de \
--docker-username=admin \
--docker-password=Harbor12345 \
--namespace=florale-emotion
# Deployments anwenden
kubectl apply -f website/k8s/ -n florale-emotion
```
## Monitoring & Troubleshooting
### Logs anzeigen
```bash
# Frontend Logs
kubectl logs -f deployment/florale-emotion-frontend -n florale-emotion
# Backend Logs
kubectl logs -f deployment/florale-emotion-backend -n florale-emotion
# Bot Logs
kubectl logs -f deployment/florale-emotion-bot -n florale-emotion
```
### Status prüfen
```bash
# Deployment Status
kubectl get deployments -n florale-emotion
# Pod Status
kubectl get pods -n florale-emotion
# Service Status
kubectl get services -n florale-emotion
# Ingress Status
kubectl get ingress -n florale-emotion
```
## Sicherheit
- Alle Images werden aus einer privaten Harbor Registry bezogen
- Non-root Container für Backend und Bot
- Security Headers in Nginx-Konfiguration
- HTTPS-Erzwingung für alle Domains
- Resource Limits für alle Container
## Skalierung
Die Anwendung kann horizontal skaliert werden:
```bash
# Frontend skalieren
kubectl scale deployment florale-emotion-frontend --replicas=3 -n florale-emotion
# Backend skalieren
kubectl scale deployment florale-emotion-backend --replicas=3 -n florale-emotion
```
## Backup & Recovery
- Kubernetes-Manifeste sind in Git versioniert
- Docker Images sind in Harbor Registry gespeichert
- Datenbank-Backups (falls vorhanden) sollten separat konfiguriert werden

View File

@ -279,15 +279,11 @@ pm2 save
Dieses Projekt ist für den internen Gebrauch von Florale Emotion bestimmt. Dieses Projekt ist für den internen Gebrauch von Florale Emotion bestimmt.
## 🙏 Danksagungen ## 🙏 DanksagungenEntwickelt mit ❤️ für Florale Emotion - Blumen für besondere Momente.---**Nächste Schritte:**
Entwickelt mit ❤️ für Florale Emotion - Blumen für besondere Momente.
---**Nächste Schritte:**
1. [ ] Domain registrieren 1. [ ] Domain registrieren
2. [ ] Hosting einrichten 2. [ ] Hosting einrichten
3. [ ] E-Mail-Accounts erstellen 3. [ ] E-Mail-Accounts erstellen
4. [ ] Social Media Accounts erstellen 4. [ ] Social Media Accounts erstellen
5. [ ] Gitea Server installieren 5. [ ] Gitea Server installieren
6. [ ] Website deployen 6. [ ] Website deployen
7. [ ] Marketing starten 7. [ ] Marketing starten

19
website/.dockerignore Normal file
View File

@ -0,0 +1,19 @@
node_modules
npm-debug.log
.git
.gitignore
README.md
.env
.nyc_output
coverage
.nyc_output
.vscode
.DS_Store
*.md
.angular
dist
e2e
src/**/*.spec.ts
karma.conf.js
protractor.conf.js
.editorconfig

32
website/Dockerfile Normal file
View File

@ -0,0 +1,32 @@
# Multi-stage build for Angular Frontend
FROM node:18-alpine AS builder
# Set working directory
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy source code
COPY . .
# Build the Angular application
RUN npm run build --prod
# Production stage
FROM nginx:alpine
# Copy built application from builder stage
COPY --from=builder /app/dist/florale-emotion /usr/share/nginx/html
# Copy custom nginx configuration
COPY nginx.conf /etc/nginx/conf.d/default.conf
# Expose port 80
EXPOSE 80
# Start nginx
CMD ["nginx", "-g", "daemon off;"]

View File

@ -0,0 +1,12 @@
node_modules
npm-debug.log
.git
.gitignore
README.md
.env
.nyc_output
coverage
.nyc_output
.vscode
.DS_Store
*.md

View File

@ -0,0 +1,31 @@
FROM node:18-alpine
# Set working directory
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy application code
COPY . .
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nodejs -u 1001
# Change ownership of the app directory
RUN chown -R nodejs:nodejs /app
USER nodejs
# Expose port
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node healthcheck.js
# Start the application
CMD ["node", "server.js"]

View File

@ -0,0 +1,28 @@
const http = require('http');
const options = {
hostname: 'localhost',
port: process.env.PORT || 3000,
path: '/health',
method: 'GET',
timeout: 2000
};
const request = http.request(options, (res) => {
if (res.statusCode === 200) {
process.exit(0);
} else {
process.exit(1);
}
});
request.on('error', () => {
process.exit(1);
});
request.on('timeout', () => {
request.destroy();
process.exit(1);
});
request.end();

60
website/k8s/backend.yaml Normal file
View File

@ -0,0 +1,60 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: florale-emotion-backend
labels:
app: florale-emotion-backend
spec:
replicas: 2
selector:
matchLabels:
app: florale-emotion-backend
template:
metadata:
labels:
app: florale-emotion-backend
spec:
imagePullSecrets:
- name: harbor-registry-secret
containers:
- name: florale-emotion-backend
image: registry.julianvollmer.de/florale-emotion/florale-emotion-backend:__IMAGE_TAG__
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: PORT
value: "3000"
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "300m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: florale-emotion-backend-service
spec:
selector:
app: florale-emotion-backend
ports:
- protocol: TCP
port: 3000
targetPort: 3000
type: ClusterIP

46
website/k8s/bot.yaml Normal file
View File

@ -0,0 +1,46 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: florale-emotion-bot
labels:
app: florale-emotion-bot
spec:
replicas: 1
selector:
matchLabels:
app: florale-emotion-bot
template:
metadata:
labels:
app: florale-emotion-bot
spec:
imagePullSecrets:
- name: harbor-registry-secret
containers:
- name: florale-emotion-bot
image: registry.julianvollmer.de/florale-emotion/florale-emotion-bot:__IMAGE_TAG__
env:
- name: NODE_ENV
value: "production"
resources:
requests:
memory: "128Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "100m"
# Social Media Bot läuft als Cronjob/Scheduled Task
# Keine Health Checks nötig da es kein HTTP Service ist
---
apiVersion: v1
kind: Service
metadata:
name: florale-emotion-bot-service
spec:
selector:
app: florale-emotion-bot
ports:
- protocol: TCP
port: 8080
targetPort: 8080
type: ClusterIP

55
website/k8s/frontend.yaml Normal file
View File

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: florale-emotion-frontend
labels:
app: florale-emotion-frontend
spec:
replicas: 2
selector:
matchLabels:
app: florale-emotion-frontend
template:
metadata:
labels:
app: florale-emotion-frontend
spec:
imagePullSecrets:
- name: harbor-registry-secret
containers:
- name: florale-emotion-frontend
image: registry.julianvollmer.de/florale-emotion/florale-emotion-frontend:__IMAGE_TAG__
ports:
- containerPort: 80
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: florale-emotion-frontend-service
spec:
selector:
app: florale-emotion-frontend
ports:
- protocol: TCP
port: 80
targetPort: 80
type: ClusterIP

View File

@ -0,0 +1,37 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: florale-emotion-dev-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/redirect-to-https: "true"
spec:
tls:
- hosts:
- dev.florale-emotion.de
- api-dev.florale-emotion.de
secretName: florale-emotion-dev-tls
rules:
- host: dev.florale-emotion.de
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: florale-emotion-frontend-service
port:
number: 80
- host: api-dev.florale-emotion.de
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: florale-emotion-backend-service
port:
number: 3000

48
website/k8s/ingress.yaml Normal file
View File

@ -0,0 +1,48 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: florale-emotion-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/redirect-to-https: "true"
spec:
tls:
- hosts:
- florale-emotion.de
- www.florale-emotion.de
- api.florale-emotion.de
secretName: florale-emotion-tls
rules:
- host: florale-emotion.de
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: florale-emotion-frontend-service
port:
number: 80
- host: www.florale-emotion.de
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: florale-emotion-frontend-service
port:
number: 80
- host: api.florale-emotion.de
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: florale-emotion-backend-service
port:
number: 3000

48
website/nginx.conf Normal file
View File

@ -0,0 +1,48 @@
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_proxied any;
gzip_comp_level 6;
gzip_types
text/plain
text/css
text/xml
text/javascript
application/json
application/javascript
application/xml+rss
application/atom+xml
image/svg+xml;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
# Handle Angular routing
location / {
try_files $uri $uri/ /index.html;
}
# Cache static assets
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
expires 1y;
add_header Cache-Control "public, immutable";
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}

View File

@ -0,0 +1,12 @@
node_modules
npm-debug.log
.git
.gitignore
README.md
.env
.nyc_output
coverage
.nyc_output
.vscode
.DS_Store
*.md

View File

@ -0,0 +1,27 @@
FROM node:18-alpine
# Set working directory
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy application code
COPY . .
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nodejs -u 1001
# Change ownership of the app directory
RUN chown -R nodejs:nodejs /app
USER nodejs
# Expose port (for health checks)
EXPOSE 8080
# Start the application
CMD ["node", "bot.js"]