Skip to content

SeaweedFS (HA) Installation

This content is not available in your language yet.

SeaweedFS (HA) is the production-grade, highly available instance of SeaweedFS. It utilizes external PostgreSQL for metadata storage and runs multiple replicas for each component.

  1. Connect to 🟢 Management Kubernetes Cluster ; _i.e w/ Kubeconfig File.

    Ensure you have defined and loaded your Global Shell Variables as described in Shell Variables.

    Terminal window
    source $HOME/opstella-installation/shell-values/kubernetes/management_cluster.vars.sh
    Terminal window
    export KUBECONFIG="$HOME/opstella-installation/kubeconfigs/management_cluster.yaml"
  2. Export Required Shell Variables

    Ensure K8S_INGRESS_TLS_CERTIFICATE_SECRET_NAME and K8S_STORAGECLASS_NAME are defined as per the Shell Variables guide.

    Terminal window
    # PostgreSQL Backend Paswords
    export SEAWEEDFS_HA_POSTGRES_SUPERUSER_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_POSTGRES_USER_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_POSTGRES_BACKUP_PASSWORD="CHANGEME" # Match cnpg-backup password from Bootstrap
    # S3 User Passwords
    export SEAWEEDFS_HA_S3_ADMIN_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_VAULT_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_HARBOR_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_OPSTELLA_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_GITLAB_CI_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_GITLAB_BACKUP_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_VELERO_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_GRAFANA_MIMIR_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_GRAFANA_LOKI_PASSWORD="CHANGEME"
    export SEAWEEDFS_HA_S3_GRAFANA_TEMPO_PASSWORD="CHANGEME"
    # Domain Names
    export SEAWEEDFS_HA_FILER_DOMAIN="seaweedfs.${BASE_DOMAIN}"
    export SEAWEEDFS_HA_API_DOMAIN="seaweedfs-s3.${BASE_DOMAIN}"
    export SEAWEEDFS_HA_ADMIN_DOMAIN="seaweedfs-admin.${BASE_DOMAIN}"
    # Admin Credentials
    export SEAWEEDFS_HA_ADMIN_PASSWORD="CHANGEME"
  3. Create Namespace for SeaweedFS HA

    Terminal window
    kubectl create namespace apps-supporting-services
  4. Install PostgreSQL Backend (CloudnativePG)

    Apply the PostgreSQL cluster manifest to serve as the SeaweedFS metadata backend.

    Terminal window
    cat <<EOF > $HOME/opstella-installation/kubernetes-manifests/seaweedfs-ha-postgres.yaml
    ---
    # Postgres Superuser (root) Credentials
    apiVersion: v1
    kind: Secret
    type: kubernetes.io/basic-auth
    metadata:
    name: seaweedfs-postgres-superuser
    namespace: apps-supporting-services
    stringData:
    username: postgres
    password: "${SEAWEEDFS_HA_POSTGRES_SUPERUSER_PASSWORD}"
    ---
    # SeaweedFS Application User Credentials
    apiVersion: v1
    kind: Secret
    type: kubernetes.io/basic-auth
    metadata:
    name: seaweedfs-postgres-user
    namespace: apps-supporting-services
    stringData:
    username: seaweedfs
    password: "${SEAWEEDFS_HA_POSTGRES_USER_PASSWORD}"
    ---
    # S3 Credentials for Postgres Backups (to SeaweedFS Minimal)
    apiVersion: v1
    kind: Secret
    type: Opaque
    metadata:
    name: seaweedfs-postgres-s3-secret
    namespace: apps-supporting-services
    stringData:
    S3_ACCESS_KEY: "cnpg-backup"
    S3_SECRET_KEY: "${SEAWEEDFS_HA_S3_POSTGRES_BACKUP_PASSWORD}"
    ---
    apiVersion: barmancloud.cnpg.io/v1
    kind: ObjectStore
    metadata:
    name: seaweedfs-postgres-backup
    namespace: apps-supporting-services
    spec:
    # This resource defines the backup destination for the Barman Cloud Plugin
    configuration:
    destinationPath: s3://postgres-backup/
    endpointURL: http://seaweedfs-s3.seaweedfs-bootstrap.svc:9000
    s3Credentials:
    accessKeyId:
    name: seaweedfs-postgres-s3-secret
    key: S3_ACCESS_KEY
    secretAccessKey:
    name: seaweedfs-postgres-s3-secret
    key: S3_SECRET_KEY
    wal:
    compression: gzip
    data:
    compression: gzip
    # Retention policy for backups and WALs handled by the plugin
    retentionPolicy: "30d"
    ---
    apiVersion: postgresql.cnpg.io/v1
    kind: Cluster
    metadata:
    name: seaweedfs-postgres
    namespace: apps-supporting-services
    labels:
    app.kubernetes.io/name: seaweedfs-postgres
    app.kubernetes.io/part-of: seaweedfs
    spec:
    instances: 1
    # Image configuration (Postgres 15)
    imageName: ghcr.io/cloudnative-pg/postgresql:15
    # Enable the Barman Cloud Plugin for backups
    plugins:
    - name: barman-cloud.cloudnative-pg.io
    isWALArchiver: true
    parameters:
    serverName: "seaweedfs-postgres"
    barmanObjectName: "seaweedfs-postgres-backup"
    # Storage Configuration
    storage:
    size: 5Gi
    storageClass: "${K8S_STORAGECLASS_NAME}"
    walStorage:
    size: 2Gi
    storageClass: "${K8S_STORAGECLASS_NAME}"
    # Bootstrap Configuration
    bootstrap:
    initdb:
    database: seaweedfs_filer
    owner: seaweedfs
    # Secret containing password for the SeaweedFS application user
    secret:
    name: seaweedfs-postgres-user
    postInitApplicationSQL:
    - |
    CREATE TABLE IF NOT EXISTS filemeta (
    dirhash BIGINT,
    name VARCHAR(65535),
    directory VARCHAR(65535),
    meta bytea,
    PRIMARY KEY (dirhash, name)
    );
    ALTER TABLE filemeta OWNER TO seaweedfs;
    # Superuser credentials (root/postgres)
    superuserSecret:
    name: seaweedfs-postgres-superuser
    # Backup Configuration (Barman Cloud Plugin will automatically detect matching ObjectStore)
    backup:
    {}
    # Monitoring (disabled for now)
    monitoring:
    enablePodMonitor: false
    # Resources
    resources:
    requests:
    memory: "256Mi"
    cpu: "200m"
    limits:
    memory: "1Gi"
    cpu: "1000m"
    EOF
    Terminal window
    kubectl apply -f seaweedfs-ha-postgres.yaml
  5. Create SeaweedFS S3 Configuration

    Terminal window
    cat <<EOF > $HOME/opstella-installation/kubernetes-manifests/seaweedfs-ha-s3.yaml
    ---
    # S3 Credentials for SeaweedFS (HA)
    apiVersion: v1
    kind: Secret
    type: Opaque
    metadata:
    name: seaweedfs-s3-secret
    namespace: apps-supporting-services
    stringData:
    # ----------------------------------------------------------------------------
    # INPUT CONFIGURATION (YAML)
    # Run 'scripts/seaweedfs-utils.sh update <this_file>' to generate the JSON below.
    # ----------------------------------------------------------------------------
    s3_config_input: |
    users:
    - name: admin
    password: "${SEAWEEDFS_HA_S3_ADMIN_PASSWORD}"
    actions:
    - Admin
    - Read
    - Write
    - List
    - Tagging
    - name: postgres-backup
    password: "${SEAWEEDFS_HA_S3_POSTGRES_BACKUP_PASSWORD}"
    actions:
    - "Read:postgres-backups"
    - "Write:postgres-backups"
    - "List:postgres-backups"
    - "Tagging:postgres-backups"
    - name: vault
    password: "${SEAWEEDFS_HA_S3_VAULT_PASSWORD}"
    actions:
    - "Read:vault"
    - "Write:vault"
    - "List:vault"
    - "Tagging:vault"
    - name: harbor
    password: "${SEAWEEDFS_HA_S3_HARBOR_PASSWORD}"
    actions:
    - "Read:harbor"
    - "Write:harbor"
    - "List:harbor"
    - "Tagging:harbor"
    - name: opstella
    password: "${SEAWEEDFS_HA_S3_OPSTELLA_PASSWORD}"
    actions:
    - "Read:opstella-web"
    - "Write:opstella-web"
    - "List:opstella-web"
    - "Tagging:opstella-web"
    - name: gitlab-ci
    password: "${SEAWEEDFS_HA_S3_GITLAB_CI_PASSWORD}"
    actions:
    - "Read:gitlab-ci-caches"
    - "Write:gitlab-ci-caches"
    - "List:gitlab-ci-caches"
    - "Tagging:gitlab-ci-caches"
    - name: gitlab-backup
    password: "${SEAWEEDFS_HA_S3_GITLAB_BACKUP_PASSWORD}"
    actions:
    - "Read:gitlab-backups"
    - "Write:gitlab-backups"
    - "List:gitlab-backups"
    - "Tagging:gitlab-backups"
    - name: velero
    password: "${SEAWEEDFS_HA_S3_VELERO_PASSWORD}"
    actions:
    - "Read:k8s-velero-backups"
    - "Write:k8s-velero-backups"
    - "List:k8s-velero-backups"
    - "Tagging:k8s-velero-backups"
    - name: grafana-mimir
    password: "${SEAWEEDFS_HA_S3_GRAFANA_MIMIR_PASSWORD}"
    actions:
    - "Read:grafana-mimir-tsdb"
    - "Write:grafana-mimir-tsdb"
    - "List:grafana-mimir-tsdb"
    - "Tagging:grafana-mimir-tsdb"
    - "Read:grafana-mimir-alertmanager"
    - "Write:grafana-mimir-alertmanager"
    - "List:grafana-mimir-alertmanager"
    - "Tagging:grafana-mimir-alertmanager"
    - "Read:grafana-mimir-ruler"
    - "Write:grafana-mimir-ruler"
    - "List:grafana-mimir-ruler"
    - "Tagging:grafana-mimir-ruler"
    - name: grafana-loki
    password: "${SEAWEEDFS_HA_S3_GRAFANA_LOKI_PASSWORD}"
    actions:
    - "Read:grafana-loki-chunks"
    - "Write:grafana-loki-chunks"
    - "List:grafana-loki-chunks"
    - "Tagging:grafana-loki-chunks"
    - "Read:grafana-loki-ruler"
    - "Write:grafana-loki-ruler"
    - "List:grafana-loki-ruler"
    - "Tagging:grafana-loki-ruler"
    - name: grafana-tempo
    password: "${SEAWEEDFS_HA_S3_GRAFANA_TEMPO_PASSWORD}"
    actions:
    - "Read:grafana-tempo-tsdb"
    - "Write:grafana-tempo-tsdb"
    - "List:grafana-tempo-tsdb"
    - "Tagging:grafana-tempo-tsdb"
    # ----------------------------------------------------------------------------
    # GENERATED CONFIGURATION (JSON) - DO NOT EDIT MANUALLY
    # ----------------------------------------------------------------------------
    seaweedfs_s3_config: ""
    EOF

    Generate the required JSON credentials:

    Terminal window
    $HOME/opstella-installation/assets/scripts/seaweedfs-utils.sh update kubernetes-manifests/seaweedfs-ha-s3.yaml

    Apply the configuration:

    Terminal window
    kubectl apply -f seaweedfs-ha-s3.yaml
  6. Add SeaweedFS Helm Repository (If not already added)

    Terminal window
    helm repo add seaweedfs https://seaweedfs.github.io/seaweedfs/helm
    helm repo update
  7. Create SeaweedFS HA Helm Values

    Terminal window
    cat <<EOF > $HOME/opstella-installation/helm-values/seaweedfs-ha-values.yaml
    global:
    createClusterRole: false # created by seaweedfs-bootstrap
    imageName: chrislusf/seaweedfs
    loggingLevel: 1
    enableSecurity: false
    # Disable global replication (Relies on Longhorn)
    enableReplication: false
    # "000" = No replication at SeaweedFS level
    replicationPlacement: "000"
    master:
    enabled: true
    replicas: 3
    # Default volume size is 1GB (1000MB) in this chart, fitting ~10 volumes in 10Gi
    volumeSizeLimitMB: 1000
    # Ensure masters are on different nodes
    affinity: |
    podAntiAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    - labelSelector:
    matchLabels:
    app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
    app.kubernetes.io/component: master
    topologyKey: kubernetes.io/hostname
    # Security Context
    podSecurityContext:
    enabled: true
    fsGroup: 1000
    containerSecurityContext:
    enabled: true
    runAsUser: 1000
    runAsGroup: 1000
    runAsNonRoot: true
    privileged: false
    allowPrivilegeEscalation: false
    seccompProfile:
    type: RuntimeDefault
    capabilities:
    drop: ["ALL"]
    # Persistence for Master (Metadata/Sequence)
    data:
    type: "persistentVolumeClaim"
    size: "5Gi"
    storageClass: "${K8S_STORAGECLASS_NAME}"
    logs:
    type: "emptyDir"
    size: "1Gi"
    volume:
    enabled: true
    replicas: 3
    ipBind: "0.0.0.0"
    minFreeSpacePercent: 5
    # Ensure volume servers are on different nodes
    affinity: |
    podAntiAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    - labelSelector:
    matchLabels:
    app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
    app.kubernetes.io/component: volume
    topologyKey: kubernetes.io/hostname
    # Security Context
    podSecurityContext:
    enabled: true
    fsGroup: 1000
    containerSecurityContext:
    enabled: true
    runAsUser: 1000
    runAsGroup: 1000
    runAsNonRoot: true
    privileged: false
    allowPrivilegeEscalation: false
    seccompProfile:
    type: RuntimeDefault
    capabilities:
    drop: ["ALL"]
    # Persistence for Volume (Data)
    dataDirs:
    - name: data
    type: "persistentVolumeClaim"
    size: "10Gi"
    storageClass: "${K8S_STORAGECLASS_NAME}"
    maxVolumes: 10
    logs:
    type: "emptyDir"
    size: "1Gi"
    filer:
    enabled: true
    replicas: 3
    port: 9001 # MinIO Console compatibility
    # Ensure filers are on different nodes
    affinity: |
    podAntiAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    - labelSelector:
    matchLabels:
    app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
    app.kubernetes.io/component: filer
    topologyKey: kubernetes.io/hostname
    # Security Context
    podSecurityContext:
    enabled: true
    fsGroup: 1000
    containerSecurityContext:
    enabled: true
    runAsUser: 1000
    runAsGroup: 1000
    runAsNonRoot: true
    privileged: false
    allowPrivilegeEscalation: false
    seccompProfile:
    type: RuntimeDefault
    capabilities:
    drop: ["ALL"]
    # Disable local LevelDB (incompatible with HA)
    extraEnvironmentVars:
    WEED_LEVELDB2_ENABLED: "false"
    WEED_POSTGRES_ENABLED: "true"
    WEED_POSTGRES_HOSTNAME: "seaweedfs-postgres-rw.apps-supporting-services.svc"
    WEED_POSTGRES_PORT: "5432"
    WEED_POSTGRES_DATABASE: "seaweedfs_filer"
    WEED_FILER_BUCKETS_FOLDER: "/buckets"
    WEED_POSTGRES_USERNAME: "seaweedfs"
    WEED_POSTGRES_PASSWORD: "${SEAWEEDFS_HA_POSTGRES_USER_PASSWORD}"
    secretExtraEnvironmentVars: {}
    # Persistence for Filer (Metadata)
    # Filer stores metadata in Postgres, so local PVC is just for logs/buffers
    data:
    type: "persistentVolumeClaim"
    size: "1Gi"
    storageClass: "${K8S_STORAGECLASS_NAME}"
    logs:
    type: "emptyDir"
    size: "1Gi"
    # Ingress for Filer (SeaweedFS WebUI)
    ingress:
    enabled: false
    className: nginx
    host: "${SEAWEEDFS_HA_FILER_DOMAIN}"
    path: "/"
    pathType: Prefix
    annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: "0"
    ingress.kubernetes.io/proxy-body-size: "0"
    # Basic Auth Configuration
    # nginx.ingress.kubernetes.io/auth-type: basic
    # nginx.ingress.kubernetes.io/auth-secret: seaweedfs-filer-basic-auth
    # nginx.ingress.kubernetes.io/auth-realm: "Authentication Required"
    tls:
    - secretName: "${K8S_INGRESS_TLS_CERTIFICATE_SECRET_NAME}"
    hosts:
    - "${SEAWEEDFS_HA_FILER_DOMAIN}"
    s3:
    enabled: true
    replicas: 2
    port: 9000 # MinIO API compatibility
    enableAuth: true
    existingConfigSecret: seaweedfs-s3-secret
    affinity: |
    podAntiAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    - labelSelector:
    matchLabels:
    app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
    app.kubernetes.io/component: s3
    topologyKey: kubernetes.io/hostname
    # Security Context
    podSecurityContext:
    enabled: true
    fsGroup: 1000
    containerSecurityContext:
    enabled: true
    runAsUser: 1000
    runAsGroup: 1000
    runAsNonRoot: true
    privileged: false
    allowPrivilegeEscalation: false
    seccompProfile:
    type: RuntimeDefault
    capabilities:
    drop: ["ALL"]
    logs:
    type: "emptyDir"
    size: "1Gi"
    # Ingress for S3 API
    ingress:
    enabled: true
    className: nginx
    host: "${SEAWEEDFS_HA_API_DOMAIN}"
    path: "/"
    pathType: Prefix
    annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: "0"
    ingress.kubernetes.io/proxy-body-size: "0"
    tls:
    - secretName: "${K8S_INGRESS_TLS_CERTIFICATE_SECRET_NAME}"
    hosts:
    - "${SEAWEEDFS_HA_API_DOMAIN}"
    admin:
    enabled: true
    secret:
    adminUser: "admin"
    adminPassword: "${SEAWEEDFS_HA_ADMIN_PASSWORD}"
    ingress:
    enabled: true
    className: nginx
    host: "${SEAWEEDFS_HA_ADMIN_DOMAIN}"
    path: "/"
    pathType: Prefix
    tls:
    - secretName: "${K8S_INGRESS_TLS_CERTIFICATE_SECRET_NAME}"
    hosts:
    - "${SEAWEEDFS_HA_ADMIN_DOMAIN}"
    # Security Context
    podSecurityContext:
    enabled: true
    fsGroup: 1000
    containerSecurityContext:
    enabled: true
    runAsUser: 1000
    runAsGroup: 1000
    runAsNonRoot: true
    privileged: false
    allowPrivilegeEscalation: false
    seccompProfile:
    type: RuntimeDefault
    capabilities:
    drop: ["ALL"]
    worker:
    enabled: true
    adminServer: "seaweedfs-admin.apps-supporting-services.svc:23646"
    podSecurityContext:
    enabled: true
    fsGroup: 1000
    containerSecurityContext:
    enabled: true
    runAsUser: 1000
    runAsGroup: 1000
    runAsNonRoot: true
    privileged: false
    allowPrivilegeEscalation: false
    seccompProfile:
    type: RuntimeDefault
    capabilities:
    drop: ["ALL"]
    EOF
  8. Install SeaweedFS HA Helm Release

    Terminal window
    helm upgrade --install seaweedfs seaweedfs/seaweedfs \
    --version 4.0.407 \
    --namespace apps-supporting-services \
    -f $HOME/opstella-installation/helm-values/seaweedfs-ha-values.yaml
  9. Provision SeaweedFS Buckets

    Terminal window
    cat <<EOF > $HOME/opstella-installation/kubernetes-manifests/seaweedfs-ha-job-provisioning.yaml
    apiVersion: batch/v1
    kind: Job
    metadata:
    name: seaweedfs-bootstrap-provisioning
    namespace: apps-supporting-services
    labels:
    app.kubernetes.io/name: seaweedfs-bootstrap
    app.kubernetes.io/component: provisioning
    spec:
    ttlSecondsAfterFinished: 300
    template:
    metadata:
    labels:
    app.kubernetes.io/name: seaweedfs-bootstrap
    app.kubernetes.io/component: provisioning
    spec:
    restartPolicy: OnFailure
    # Security Context
    securityContext:
    runAsNonRoot: true
    runAsUser: 10000
    fsGroup: 10000
    seccompProfile:
    type: RuntimeDefault
    containers:
    - name: provisioner
    image: chrislusf/seaweedfs:4.07
    # Security Context
    securityContext:
    allowPrivilegeEscalation: false
    readOnlyRootFilesystem: false
    runAsNonRoot: true
    runAsUser: 10000
    capabilities:
    drop: ["ALL"]
    # Resource Limits
    resources:
    requests:
    memory: "64Mi"
    cpu: "100m"
    limits:
    memory: "128Mi"
    cpu: "200m"
    env:
    # Target SeaweedFS Master (HA instance)
    - name: WEED_MASTER
    value: "seaweedfs-master:9333"
    # Target SeaweedFS Filer (HA instance)
    - name: WEED_FILER
    value: "seaweedfs-filer:9001"
    # Comma-separated list of buckets to create
    - name: SEAWEEDFS_BUCKETS
    value: "opstella-web,postgres-backups,harbor,vault,k8s-velero-backups,gitlab-backups,gitlab-ci-caches,grafana-loki-chunks,grafana-loki-ruler,grafana-mimir-tsdb,grafana-mimir-ruler,grafana-mimir-alertmanager,grafana-tempo-tsdb"
    command:
    - "/bin/sh"
    - "-c"
    - |
    set -e
    echo "[INFO] Starting SeaweedFS Bootstrap Provisioning..."
    echo "[INFO] Target Master: \$WEED_MASTER"
    echo "[INFO] Target Filer: \$WEED_FILER"
    echo "[INFO] Buckets: \$SEAWEEDFS_BUCKETS"
    # 1. Wait for Master Leader
    echo "[INFO] Waiting for SeaweedFS Master Leader..."
    START_TIME=\$(date +%s)
    TIMEOUT=300
    until echo cluster.ps | weed shell -master=\$WEED_MASTER > /dev/null 2>&1; do
    CURRENT_TIME=\$(date +%s)
    ELAPSED_TIME=\$((\$CURRENT_TIME - \$START_TIME))
    if [ \$ELAPSED_TIME -gt \$TIMEOUT ]; then
    echo "[ERROR] Timeout waiting for SeaweedFS Master at \$WEED_MASTER"
    exit 1
    fi
    echo "[WAIT] Connecting to master... (\${ELAPSED_TIME}s)"
    sleep 5
    done
    echo "[INFO] Connected to Master."
    # 2. Wait for Filer (required for bucket creation)
    echo "[INFO] Waiting for SeaweedFS Filer..."
    until echo fs.ls / | weed shell -master=\$WEED_MASTER -filer=\$WEED_FILER > /dev/null 2>&1; do
    CURRENT_TIME=\$(date +%s)
    ELAPSED_TIME=\$((\$CURRENT_TIME - \$START_TIME))
    if [ \$ELAPSED_TIME -gt \$TIMEOUT ]; then
    echo "[ERROR] Timeout waiting for SeaweedFS Filer at \$WEED_FILER"
    exit 1
    fi
    echo "[WAIT] Connecting to filer... (\${ELAPSED_TIME}s)"
    sleep 5
    done
    echo "[INFO] Connected to Filer."
    # 3. Create Buckets
    # Generate weed shell script
    SCRIPT_FILE="/tmp/provision.weed"
    echo "# Auto-generated provisioning script" > \$SCRIPT_FILE
    # POSIX compliant way to split comma-separated string
    echo "\$SEAWEEDFS_BUCKETS" | tr ',' '\n' | while read -r bucket; do
    # Trim whitespace
    bucket=\$(echo "\$bucket" | sed 's/^[[:space:]]*//;s/[[:space:]]*\$//')
    if [ -n "\$bucket" ]; then
    echo "s3.bucket.create -name \$bucket" >> \$SCRIPT_FILE
    echo "[INFO] Added bucket creation command for: \$bucket"
    fi
    done
    echo "s3.bucket.list" >> \$SCRIPT_FILE
    # Execute
    echo "[INFO] Executing provisioning commands:"
    cat \$SCRIPT_FILE
    echo "----------------------------------------"
    # Run weed shell with better error handling
    OUTPUT_FILE="/tmp/output.log"
    if ! cat \$SCRIPT_FILE | weed shell -master=\$WEED_MASTER -filer=\$WEED_FILER 2>&1 | tee \$OUTPUT_FILE; then
    # Check if error is just "bucket already exists"
    if grep -qi "already exist\|AlreadyExists" \$OUTPUT_FILE; then
    echo "[WARN] Some buckets already exist (expected, continuing...)"
    else
    echo "[ERROR] Bucket creation failed with unexpected error:"
    cat \$OUTPUT_FILE
    exit 1
    fi
    fi
    echo "[INFO] Bucket creation completed."
    # 4. Verify bucket creation
    echo "[INFO] Verifying bucket creation..."
    BUCKET_LIST=\$(echo s3.bucket.list | weed shell -master=\$WEED_MASTER -filer=\$WEED_FILER 2>&1)
    echo "\$SEAWEEDFS_BUCKETS" | tr ',' '\n' | while read -r bucket; do
    bucket=\$(echo "\$bucket" | sed 's/^[[:space:]]*//;s/[[:space:]]*\$//')
    if [ -n "\$bucket" ]; then
    if echo "\$BUCKET_LIST" | grep -q "\$bucket"; then
    echo "[SUCCESS] Bucket '\$bucket' exists"
    else
    echo "[ERROR] Bucket '\$bucket' NOT found!"
    echo "[DEBUG] Available buckets:"
    echo "\$BUCKET_LIST"
    exit 1
    fi
    fi
    done
    echo "[SUCCESS] All buckets verified successfully!"
    EOF

    Apply the provisioning job:

    Terminal window
    kubectl apply -f seaweedfs-ha-job-provisioning.yaml
  1. Verify Pod Status

    Terminal window
    kubectl get pods -n apps-supporting-services

    💡 The following components should be Running:

    NAME READY STATUS RESTARTS AGE
    seaweedfs-admin-0 1/1 Running 0 ...
    seaweedfs-filer-0 1/1 Running 0 ...
    seaweedfs-filer-1 1/1 Running 0 ...
    seaweedfs-filer-2 1/1 Running 0 ...
    seaweedfs-master-0 1/1 Running 0 ...
    seaweedfs-master-1 1/1 Running 0 ...
    seaweedfs-master-2 1/1 Running 0 ...
    seaweedfs-postgres-1 1/1 Running 0 ...
    seaweedfs-s3-XXXXXXXXXX-YYYYY 1/1 Running 0 ...
    seaweedfs-s3-XXXXXXXXXX-ZZZZZ 1/1 Running 0 ...
    seaweedfs-volume-0 1/1 Running 0 ...
    seaweedfs-volume-1 1/1 Running 0 ...
    seaweedfs-volume-2 1/1 Running 0 ...
    seaweedfs-worker-XXXXXXXXXX-YYYYY 1/1 Running 0 ...
  2. Verify High Availability

    • Access the SeaweedFS Admin UI to confirm the cluster status.
    • Verify that the S3 API is responding correctly via the HA endpoint.

Finished?

Use the below navigation to proceed