From 42a2fd74ea9409b9550834c6602f4222fd1412de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Zsolt=20Kacs=C3=A1ndi?= Date: Fri, 9 Aug 2024 17:04:05 +0200 Subject: [PATCH] chore(ui): use nginx-unprivileged base image for the UI (#1983) * chore(ui): use nginx-unprivileged base image for the UI * chore(ui): enable podSecurityContext and containerSecurityContext for the UI * docs(helm): update Helm docs * chore(ui): use nginx-unprivileged base image for the UI * chore(gcp): run gen-bicep --- installation/aws/VmClarity.cfn | 2 +- installation/azure/vmclarity-install.sh | 2 +- installation/azure/vmclarity.json | 6 +++--- installation/docker/gateway.conf | 2 +- installation/gcp/dm/components/vmclarity-install.sh | 2 +- installation/kubernetes/helm/vmclarity/README.md | 4 ++-- .../helm/vmclarity/templates/gateway/configmap.yaml | 2 +- .../kubernetes/helm/vmclarity/templates/ui/deployment.yaml | 4 ++++ installation/kubernetes/helm/vmclarity/values.yaml | 4 ++-- ui/Dockerfile | 2 +- ui/nginx.conf | 4 ++-- 11 files changed, 19 insertions(+), 15 deletions(-) diff --git a/installation/aws/VmClarity.cfn b/installation/aws/VmClarity.cfn index c6bdb5b50..c6539473f 100644 --- a/installation/aws/VmClarity.cfn +++ b/installation/aws/VmClarity.cfn @@ -612,7 +612,7 @@ Resources: http { upstream ui { - server ui:80; + server ui:8080; } upstream uibackend { diff --git a/installation/azure/vmclarity-install.sh b/installation/azure/vmclarity-install.sh index efb932909..c4b4c3b37 100644 --- a/installation/azure/vmclarity-install.sh +++ b/installation/azure/vmclarity-install.sh @@ -421,7 +421,7 @@ events { http { upstream ui { - server ui:80; + server ui:8080; } upstream uibackend { diff --git a/installation/azure/vmclarity.json b/installation/azure/vmclarity.json index 5de836dc3..decad6ec6 100644 --- a/installation/azure/vmclarity.json +++ b/installation/azure/vmclarity.json @@ -5,7 +5,7 @@ "_generator": { "name": "bicep", "version": "0.29.47.4906", - "templateHash": "13927729476504818038" + "templateHash": "17757178513922425303" } }, "parameters": { @@ -577,7 +577,7 @@ "_generator": { "name": "bicep", "version": "0.29.47.4906", - "templateHash": "11450816723859992713" + "templateHash": "3933691059982936053" } }, "parameters": { @@ -843,7 +843,7 @@ "AZURE_SCANNER_STORAGE_ACCOUNT_NAME": "[variables('storageAccountName')]", "AZURE_SCANNER_STORAGE_CONTAINER_NAME": "[variables('snapshotContainerName')]" }, - "scriptTemplate": "#!/bin/bash\n\nset -euo pipefail\n\nmkdir -p /etc/vmclarity\nmkdir -p /opt/vmclarity\n\ncat << 'EOF' > /etc/vmclarity/deploy.sh\n#!/bin/bash\nset -euo pipefail\n\n# Install the latest version of docker from the offical\n# docker repository instead of the older version built into\n# ubuntu, so that we can use docker compose v2.\n#\n# To install this we need to add the docker apt repo gpg key\n# to the apt keyring, and then add the apt sources based on\n# our version of ubuntu. Then we can finally apt install all\n# the required docker components.\napt-get update\napt-get install -y ca-certificates curl gnupg\nmkdir -p /etc/apt/keyrings\nchmod 755 /etc/apt/keyrings\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --yes --dearmor -o /etc/apt/keyrings/docker.gpg\nchmod a+r /etc/apt/keyrings/docker.gpg\necho \\\n \"deb [arch=\"$(dpkg --print-architecture)\" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \\\n \"$(. /etc/os-release && echo \"$VERSION_CODENAME\")\" stable\" | \\\n sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\napt-get update\napt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin\n\nif [ \"__DatabaseToUse__\" == \"Postgresql\" ]; then\n # Configure the VMClarity backend to use the local postgres\n # service\n echo \"VMCLARITY_APISERVER_DATABASE_DRIVER=POSTGRES\" > /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_NAME=vmclarity\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_USER=vmclarity\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PASS=__PostgresDBPassword__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_HOST=postgresql\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PORT=5432\" >> /etc/vmclarity/apiserver.env\nelif [ \"__DatabaseToUse__\" == \"External Postgresql\" ]; then\n # Configure the VMClarity backend to use the postgres\n # database configured by the user.\n echo \"VMCLARITY_APISERVER_DATABASE_DRIVER=POSTGRES\" > /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_NAME=__ExternalDBName__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_USER=__ExternalDBUsername__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PASS=__ExternalDBPassword__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_HOST=__ExternalDBHost__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PORT=__ExternalDBPort__\" >> /etc/vmclarity/apiserver.env\nelif [ \"__DatabaseToUse__\" == \"SQLite\" ]; then\n # Configure the VMClarity backend to use the SQLite DB\n # driver and configure the storage location so that it\n # persists.\n echo \"VMCLARITY_APISERVER_DATABASE_DRIVER=LOCAL\" > /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_LOCAL_DB_PATH=/data/vmclarity.db\" >> /etc/vmclarity/apiserver.env\nfi\n\n# Replace anywhere in the config.env __CONTROLPLANE_HOST__\n# with the local ipv4 IP address of the VMClarity server.\nlocal_ip_address=\"$(curl -s -H Metadata:true --noproxy \"*\" \"http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2021-02-01&format=text\")\"\nsed -i \"s/__CONTROLPLANE_HOST__/${local_ip_address}/\" /etc/vmclarity/orchestrator.env\n\n# Reload the systemd daemon to ensure that the VMClarity unit\n# has been detected.\nsystemctl daemon-reload\n\n# Create directory required for grype-server\n/usr/bin/mkdir -p /opt/grype-server\n/usr/bin/chown -R 1000:1000 /opt/grype-server\n\n# Create directory required for vmclarity apiserver\n/usr/bin/mkdir -p /opt/vmclarity\n\n# Create directory for exploit db server\n/usr/bin/mkdir -p /opt/exploits\n\n# Create directory for trivy server\n/usr/bin/mkdir -p /opt/trivy-server\n\n# Create directory for yara rule server\n/usr/bin/mkdir -p /opt/yara-rule-server\n\n# Enable and start/restart VMClarity backend\nsystemctl enable vmclarity.service\nsystemctl restart vmclarity.service\n\n# Add admin user to docker group and activate the changes\nusermod -a -G docker __AdminUsername__\nEOF\nchmod 744 /etc/vmclarity/deploy.sh\n\ncat << 'EOF' > /etc/vmclarity/yara-rule-server.yaml\nenable_json_log: true\nrule_update_schedule: \"0 0 * * *\"\nrule_sources:\n - name: \"base\"\n url: \"https://github.com/Yara-Rules/rules/archive/refs/heads/master.zip\"\n exclude_regex: \".*index.*.yar|.*/utils/.*|.*/deprecated/.*|.*index_.*|.*MALW_AZORULT.yar\"\n - name: \"magic\"\n url: \"https://github.com/securitymagic/yara/archive/refs/heads/main.zip\"\n exclude_regex: \".*index.*.yar\"\nEOF\nchmod 644 /etc/vmclarity/yara-rule-server.yaml\n\ncat << 'EOF' > /etc/vmclarity/orchestrator.env\nVMCLARITY_ORCHESTRATOR_PROVIDER=Azure\nVMCLARITY_AZURE_SUBSCRIPTION_ID=__AZURE_SUBSCRIPTION_ID__\nVMCLARITY_AZURE_SCANNER_LOCATION=__AZURE_SCANNER_LOCATION__\nVMCLARITY_AZURE_SCANNER_RESOURCE_GROUP=__AZURE_SCANNER_RESOURCE_GROUP__\nVMCLARITY_AZURE_SCANNER_SUBNET_ID=__AZURE_SCANNER_SUBNET_ID__\nVMCLARITY_AZURE_SCANNER_PUBLIC_KEY=__AZURE_SCANNER_PUBLIC_KEY__\nVMCLARITY_AZURE_SCANNER_VM_SIZE=__AZURE_SCANNER_VM_SIZE__\nVMCLARITY_AZURE_SCANNER_IMAGE_PUBLISHER=__AZURE_SCANNER_IMAGE_PUBLISHER__\nVMCLARITY_AZURE_SCANNER_IMAGE_OFFER=__AZURE_SCANNER_IMAGE_OFFER__\nVMCLARITY_AZURE_SCANNER_IMAGE_SKU=__AZURE_SCANNER_IMAGE_SKU__\nVMCLARITY_AZURE_SCANNER_IMAGE_VERSION=__AZURE_SCANNER_IMAGE_VERSION__\nVMCLARITY_AZURE_SCANNER_SECURITY_GROUP=__AZURE_SCANNER_SECURITY_GROUP__\nVMCLARITY_AZURE_SCANNER_STORAGE_ACCOUNT_NAME=__AZURE_SCANNER_STORAGE_ACCOUNT_NAME__\nVMCLARITY_AZURE_SCANNER_STORAGE_CONTAINER_NAME=__AZURE_SCANNER_STORAGE_CONTAINER_NAME__\n\nVMCLARITY_ORCHESTRATOR_APISERVER_ADDRESS=http://apiserver:8888\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_CONTAINER_IMAGE=__ScannerContainerImage__\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_APISERVER_ADDRESS=http://__CONTROLPLANE_HOST__:8888\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_EXPLOITSDB_ADDRESS=http://__CONTROLPLANE_HOST__:1326\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_TRIVY_SERVER_ADDRESS=http://__CONTROLPLANE_HOST__:9992\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_GRYPE_SERVER_ADDRESS=__CONTROLPLANE_HOST__:9991\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_YARA_RULE_SERVER_ADDRESS=http://__CONTROLPLANE_HOST__:9993\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_DELETE_POLICY=__AssetScanDeletePolicy__\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_FRESHCLAM_MIRROR=http://__CONTROLPLANE_HOST__:1000/clamav\nEOF\nchmod 644 /etc/vmclarity/orchestrator.env\n\ncat << 'EOF' > /etc/vmclarity/vmclarity.yaml\nservices:\n apiserver:\n image: __APIServerContainerImage__\n command:\n - run\n - --log-level\n - info\n ports:\n - \"8888:8888\"\n env_file: ./apiserver.env\n volumes:\n - type: bind\n source: /opt/vmclarity\n target: /data\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8081/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n orchestrator:\n image: __OrchestratorContainerImage__\n command:\n - run\n - --log-level\n - info\n env_file: ./orchestrator.env\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n depends_on:\n apiserver:\n condition: service_healthy\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8082/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n ui:\n image: __UIContainerImage__\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n depends_on:\n apiserver:\n condition: service_healthy\n\n uibackend:\n image: __UIBackendContainerImage__\n command:\n - run\n - --log-level\n - info\n env_file: ./uibackend.env\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n depends_on:\n apiserver:\n condition: service_healthy\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8083/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n gateway:\n image: nginx\n ports:\n - \"80:80\"\n configs:\n - source: gateway_config\n target: /etc/nginx/nginx.conf\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n\n exploit-db-server:\n image: __ExploitDBServerContainerImage__\n ports:\n - \"1326:1326\"\n volumes:\n - type: bind\n source: /opt/exploits\n target: /vuls\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: [\"CMD\", \"nc\", \"-z\", \"127.0.0.1\", \"1326\"]\n interval: 10s\n retries: 60\n\n trivy-server:\n image: __TrivyServerContainerImage__\n command:\n - server\n ports:\n - \"9992:9992\"\n env_file: ./trivy-server.env\n volumes:\n - type: bind\n source: /opt/trivy-server\n target: /home/scanner/.cache\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: [\"CMD\", \"nc\", \"-z\", \"127.0.0.1\", \"9992\"]\n interval: 10s\n retries: 60\n\n grype-server:\n image: __GrypeServerContainerImage__\n command:\n - run\n - --log-level\n - warning\n ports:\n - \"9991:9991\"\n volumes:\n - type: bind\n source: /opt/grype-server\n target: /data\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: wget --no-verbose --tries=10 --spider http://127.0.0.1:8080/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n freshclam-mirror:\n image: __FreshclamMirrorContainerImage__\n ports:\n - \"1000:80\"\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n\n yara-rule-server:\n image: __YaraRuleServerContainerImage__\n command:\n - run\n ports:\n - \"9993:8080\"\n configs:\n - source: yara_rule_server_config\n target: /etc/yara-rule-server/config.yaml\n volumes:\n - type: bind\n source: /opt/yara-rule-server\n target: /var/lib/yara-rule-server\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8082/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n swagger-ui:\n image: swaggerapi/swagger-ui:v5.17.14\n environment:\n CONFIG_URL: /apidocs/swagger-config.json\n configs:\n - source: swagger_config\n target: /usr/share/nginx/html/swagger-config.json\n\nconfigs:\n gateway_config:\n file: ./gateway.conf\n swagger_config:\n file: ./swagger-config.json\n yara_rule_server_config:\n file: ./yara-rule-server.yaml\nEOF\n\ntouch /etc/vmclarity/vmclarity.override.yaml\n# shellcheck disable=SC2050\nif [ \"__DatabaseToUse__\" == \"Postgresql\" ]; then\n cat << 'EOF' > /etc/vmclarity/vmclarity.override.yaml\nservices:\n postgresql:\n image: __PostgresqlContainerImage__\n env_file: ./postgres.env\n ports:\n - \"5432:5432\"\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: [\"CMD-SHELL\", \"pg_isready -d vmclarity -U vmclarity\"]\n interval: 10s\n retries: 60\n\n apiserver:\n depends_on:\n postgresql:\n condition: service_healthy\nEOF\nfi\n\ncat << 'EOF' > /etc/vmclarity/swagger-config.json\n{\n \"urls\": [\n {\n \"name\": \"VMClarity API\",\n \"url\": \"/api/openapi.json\"\n }\n ]\n}\nEOF\nchmod 644 /etc/vmclarity/swagger-config.json\n\ncat << 'EOF' > /etc/vmclarity/uibackend.env\n##\n## UIBackend configuration\n##\n# VMClarity API server address\nVMCLARITY_UIBACKEND_APISERVER_ADDRESS=http://apiserver:8888\nEOF\nchmod 644 /etc/vmclarity/uibackend.env\n\ncat << 'EOF' > /etc/vmclarity/trivy-server.env\nTRIVY_LISTEN=0.0.0.0:9992\nTRIVY_CACHE_DIR=/home/scanner/.cache/trivy\nEOF\nchmod 644 /etc/vmclarity/trivy-server.env\n\ncat << 'EOF' > /etc/vmclarity/postgres.env\nPOSTGRESQL_USERNAME=vmclarity\nPOSTGRESQL_PASSWORD=__PostgresDBPassword__\nPOSTGRESQL_DATABASE=vmclarity\nEOF\nchmod 644 /etc/vmclarity/postgres.env\n\ncat << 'EOF' > /etc/vmclarity/gateway.conf\nevents {\n worker_connections 1024;\n}\n\nhttp {\n upstream ui {\n server ui:80;\n }\n\n upstream uibackend {\n server uibackend:8890;\n }\n\n upstream apiserver {\n server apiserver:8888;\n }\n\n server {\n listen 80;\n absolute_redirect off;\n\n location / {\n proxy_pass http://ui/;\n }\n\n location /ui/api/ {\n proxy_pass http://uibackend/;\n }\n\n location /api/ {\n proxy_set_header X-Forwarded-Host $http_host;\n proxy_set_header X-Forwarded-Prefix /api;\n proxy_set_header X-Forwarded-Proto $scheme;\n proxy_pass http://apiserver/;\n }\n\n location /apidocs/ {\n proxy_pass http://swagger-ui:8080/;\n }\n }\n}\nEOF\nchmod 644 /etc/vmclarity/gateway.conf\n\ncat << 'EOF' > /lib/systemd/system/vmclarity.service\n[Unit]\nDescription=VmClarity\nAfter=docker.service\nRequires=docker.service\n\n[Service]\nTimeoutStartSec=0\nType=oneshot\nRemainAfterExit=true\nExecStart=/usr/bin/docker compose -p vmclarity -f /etc/vmclarity/vmclarity.yaml -f /etc/vmclarity/vmclarity.override.yaml up -d --wait --remove-orphans\nExecStop=/usr/bin/docker compose -p vmclarity -f /etc/vmclarity/vmclarity.yaml -f /etc/vmclarity/vmclarity.override.yaml down\n\n[Install]\nWantedBy=multi-user.target\nEOF\nchmod 644 /lib/systemd/system/vmclarity.service\n\n/etc/vmclarity/deploy.sh\n", + "scriptTemplate": "#!/bin/bash\n\nset -euo pipefail\n\nmkdir -p /etc/vmclarity\nmkdir -p /opt/vmclarity\n\ncat << 'EOF' > /etc/vmclarity/deploy.sh\n#!/bin/bash\nset -euo pipefail\n\n# Install the latest version of docker from the offical\n# docker repository instead of the older version built into\n# ubuntu, so that we can use docker compose v2.\n#\n# To install this we need to add the docker apt repo gpg key\n# to the apt keyring, and then add the apt sources based on\n# our version of ubuntu. Then we can finally apt install all\n# the required docker components.\napt-get update\napt-get install -y ca-certificates curl gnupg\nmkdir -p /etc/apt/keyrings\nchmod 755 /etc/apt/keyrings\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --yes --dearmor -o /etc/apt/keyrings/docker.gpg\nchmod a+r /etc/apt/keyrings/docker.gpg\necho \\\n \"deb [arch=\"$(dpkg --print-architecture)\" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \\\n \"$(. /etc/os-release && echo \"$VERSION_CODENAME\")\" stable\" | \\\n sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\napt-get update\napt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin\n\nif [ \"__DatabaseToUse__\" == \"Postgresql\" ]; then\n # Configure the VMClarity backend to use the local postgres\n # service\n echo \"VMCLARITY_APISERVER_DATABASE_DRIVER=POSTGRES\" > /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_NAME=vmclarity\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_USER=vmclarity\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PASS=__PostgresDBPassword__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_HOST=postgresql\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PORT=5432\" >> /etc/vmclarity/apiserver.env\nelif [ \"__DatabaseToUse__\" == \"External Postgresql\" ]; then\n # Configure the VMClarity backend to use the postgres\n # database configured by the user.\n echo \"VMCLARITY_APISERVER_DATABASE_DRIVER=POSTGRES\" > /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_NAME=__ExternalDBName__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_USER=__ExternalDBUsername__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PASS=__ExternalDBPassword__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_HOST=__ExternalDBHost__\" >> /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_DB_PORT=__ExternalDBPort__\" >> /etc/vmclarity/apiserver.env\nelif [ \"__DatabaseToUse__\" == \"SQLite\" ]; then\n # Configure the VMClarity backend to use the SQLite DB\n # driver and configure the storage location so that it\n # persists.\n echo \"VMCLARITY_APISERVER_DATABASE_DRIVER=LOCAL\" > /etc/vmclarity/apiserver.env\n echo \"VMCLARITY_APISERVER_LOCAL_DB_PATH=/data/vmclarity.db\" >> /etc/vmclarity/apiserver.env\nfi\n\n# Replace anywhere in the config.env __CONTROLPLANE_HOST__\n# with the local ipv4 IP address of the VMClarity server.\nlocal_ip_address=\"$(curl -s -H Metadata:true --noproxy \"*\" \"http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2021-02-01&format=text\")\"\nsed -i \"s/__CONTROLPLANE_HOST__/${local_ip_address}/\" /etc/vmclarity/orchestrator.env\n\n# Reload the systemd daemon to ensure that the VMClarity unit\n# has been detected.\nsystemctl daemon-reload\n\n# Create directory required for grype-server\n/usr/bin/mkdir -p /opt/grype-server\n/usr/bin/chown -R 1000:1000 /opt/grype-server\n\n# Create directory required for vmclarity apiserver\n/usr/bin/mkdir -p /opt/vmclarity\n\n# Create directory for exploit db server\n/usr/bin/mkdir -p /opt/exploits\n\n# Create directory for trivy server\n/usr/bin/mkdir -p /opt/trivy-server\n\n# Create directory for yara rule server\n/usr/bin/mkdir -p /opt/yara-rule-server\n\n# Enable and start/restart VMClarity backend\nsystemctl enable vmclarity.service\nsystemctl restart vmclarity.service\n\n# Add admin user to docker group and activate the changes\nusermod -a -G docker __AdminUsername__\nEOF\nchmod 744 /etc/vmclarity/deploy.sh\n\ncat << 'EOF' > /etc/vmclarity/yara-rule-server.yaml\nenable_json_log: true\nrule_update_schedule: \"0 0 * * *\"\nrule_sources:\n - name: \"base\"\n url: \"https://github.com/Yara-Rules/rules/archive/refs/heads/master.zip\"\n exclude_regex: \".*index.*.yar|.*/utils/.*|.*/deprecated/.*|.*index_.*|.*MALW_AZORULT.yar\"\n - name: \"magic\"\n url: \"https://github.com/securitymagic/yara/archive/refs/heads/main.zip\"\n exclude_regex: \".*index.*.yar\"\nEOF\nchmod 644 /etc/vmclarity/yara-rule-server.yaml\n\ncat << 'EOF' > /etc/vmclarity/orchestrator.env\nVMCLARITY_ORCHESTRATOR_PROVIDER=Azure\nVMCLARITY_AZURE_SUBSCRIPTION_ID=__AZURE_SUBSCRIPTION_ID__\nVMCLARITY_AZURE_SCANNER_LOCATION=__AZURE_SCANNER_LOCATION__\nVMCLARITY_AZURE_SCANNER_RESOURCE_GROUP=__AZURE_SCANNER_RESOURCE_GROUP__\nVMCLARITY_AZURE_SCANNER_SUBNET_ID=__AZURE_SCANNER_SUBNET_ID__\nVMCLARITY_AZURE_SCANNER_PUBLIC_KEY=__AZURE_SCANNER_PUBLIC_KEY__\nVMCLARITY_AZURE_SCANNER_VM_SIZE=__AZURE_SCANNER_VM_SIZE__\nVMCLARITY_AZURE_SCANNER_IMAGE_PUBLISHER=__AZURE_SCANNER_IMAGE_PUBLISHER__\nVMCLARITY_AZURE_SCANNER_IMAGE_OFFER=__AZURE_SCANNER_IMAGE_OFFER__\nVMCLARITY_AZURE_SCANNER_IMAGE_SKU=__AZURE_SCANNER_IMAGE_SKU__\nVMCLARITY_AZURE_SCANNER_IMAGE_VERSION=__AZURE_SCANNER_IMAGE_VERSION__\nVMCLARITY_AZURE_SCANNER_SECURITY_GROUP=__AZURE_SCANNER_SECURITY_GROUP__\nVMCLARITY_AZURE_SCANNER_STORAGE_ACCOUNT_NAME=__AZURE_SCANNER_STORAGE_ACCOUNT_NAME__\nVMCLARITY_AZURE_SCANNER_STORAGE_CONTAINER_NAME=__AZURE_SCANNER_STORAGE_CONTAINER_NAME__\n\nVMCLARITY_ORCHESTRATOR_APISERVER_ADDRESS=http://apiserver:8888\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_CONTAINER_IMAGE=__ScannerContainerImage__\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_APISERVER_ADDRESS=http://__CONTROLPLANE_HOST__:8888\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_EXPLOITSDB_ADDRESS=http://__CONTROLPLANE_HOST__:1326\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_TRIVY_SERVER_ADDRESS=http://__CONTROLPLANE_HOST__:9992\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_GRYPE_SERVER_ADDRESS=__CONTROLPLANE_HOST__:9991\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_YARA_RULE_SERVER_ADDRESS=http://__CONTROLPLANE_HOST__:9993\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_DELETE_POLICY=__AssetScanDeletePolicy__\nVMCLARITY_ORCHESTRATOR_ASSETSCAN_WATCHER_SCANNER_FRESHCLAM_MIRROR=http://__CONTROLPLANE_HOST__:1000/clamav\nEOF\nchmod 644 /etc/vmclarity/orchestrator.env\n\ncat << 'EOF' > /etc/vmclarity/vmclarity.yaml\nservices:\n apiserver:\n image: __APIServerContainerImage__\n command:\n - run\n - --log-level\n - info\n ports:\n - \"8888:8888\"\n env_file: ./apiserver.env\n volumes:\n - type: bind\n source: /opt/vmclarity\n target: /data\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8081/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n orchestrator:\n image: __OrchestratorContainerImage__\n command:\n - run\n - --log-level\n - info\n env_file: ./orchestrator.env\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n depends_on:\n apiserver:\n condition: service_healthy\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8082/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n ui:\n image: __UIContainerImage__\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n depends_on:\n apiserver:\n condition: service_healthy\n\n uibackend:\n image: __UIBackendContainerImage__\n command:\n - run\n - --log-level\n - info\n env_file: ./uibackend.env\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n depends_on:\n apiserver:\n condition: service_healthy\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8083/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n gateway:\n image: nginx\n ports:\n - \"80:80\"\n configs:\n - source: gateway_config\n target: /etc/nginx/nginx.conf\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n\n exploit-db-server:\n image: __ExploitDBServerContainerImage__\n ports:\n - \"1326:1326\"\n volumes:\n - type: bind\n source: /opt/exploits\n target: /vuls\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: [\"CMD\", \"nc\", \"-z\", \"127.0.0.1\", \"1326\"]\n interval: 10s\n retries: 60\n\n trivy-server:\n image: __TrivyServerContainerImage__\n command:\n - server\n ports:\n - \"9992:9992\"\n env_file: ./trivy-server.env\n volumes:\n - type: bind\n source: /opt/trivy-server\n target: /home/scanner/.cache\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: [\"CMD\", \"nc\", \"-z\", \"127.0.0.1\", \"9992\"]\n interval: 10s\n retries: 60\n\n grype-server:\n image: __GrypeServerContainerImage__\n command:\n - run\n - --log-level\n - warning\n ports:\n - \"9991:9991\"\n volumes:\n - type: bind\n source: /opt/grype-server\n target: /data\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: wget --no-verbose --tries=10 --spider http://127.0.0.1:8080/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n freshclam-mirror:\n image: __FreshclamMirrorContainerImage__\n ports:\n - \"1000:80\"\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n\n yara-rule-server:\n image: __YaraRuleServerContainerImage__\n command:\n - run\n ports:\n - \"9993:8080\"\n configs:\n - source: yara_rule_server_config\n target: /etc/yara-rule-server/config.yaml\n volumes:\n - type: bind\n source: /opt/yara-rule-server\n target: /var/lib/yara-rule-server\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: wget --no-verbose --tries=1 --spider http://127.0.0.1:8082/healthz/ready || exit 1\n interval: 10s\n retries: 60\n\n swagger-ui:\n image: swaggerapi/swagger-ui:v5.17.14\n environment:\n CONFIG_URL: /apidocs/swagger-config.json\n configs:\n - source: swagger_config\n target: /usr/share/nginx/html/swagger-config.json\n\nconfigs:\n gateway_config:\n file: ./gateway.conf\n swagger_config:\n file: ./swagger-config.json\n yara_rule_server_config:\n file: ./yara-rule-server.yaml\nEOF\n\ntouch /etc/vmclarity/vmclarity.override.yaml\n# shellcheck disable=SC2050\nif [ \"__DatabaseToUse__\" == \"Postgresql\" ]; then\n cat << 'EOF' > /etc/vmclarity/vmclarity.override.yaml\nservices:\n postgresql:\n image: __PostgresqlContainerImage__\n env_file: ./postgres.env\n ports:\n - \"5432:5432\"\n logging:\n driver: journald\n deploy:\n mode: replicated\n replicas: 1\n restart_policy:\n condition: on-failure\n healthcheck:\n test: [\"CMD-SHELL\", \"pg_isready -d vmclarity -U vmclarity\"]\n interval: 10s\n retries: 60\n\n apiserver:\n depends_on:\n postgresql:\n condition: service_healthy\nEOF\nfi\n\ncat << 'EOF' > /etc/vmclarity/swagger-config.json\n{\n \"urls\": [\n {\n \"name\": \"VMClarity API\",\n \"url\": \"/api/openapi.json\"\n }\n ]\n}\nEOF\nchmod 644 /etc/vmclarity/swagger-config.json\n\ncat << 'EOF' > /etc/vmclarity/uibackend.env\n##\n## UIBackend configuration\n##\n# VMClarity API server address\nVMCLARITY_UIBACKEND_APISERVER_ADDRESS=http://apiserver:8888\nEOF\nchmod 644 /etc/vmclarity/uibackend.env\n\ncat << 'EOF' > /etc/vmclarity/trivy-server.env\nTRIVY_LISTEN=0.0.0.0:9992\nTRIVY_CACHE_DIR=/home/scanner/.cache/trivy\nEOF\nchmod 644 /etc/vmclarity/trivy-server.env\n\ncat << 'EOF' > /etc/vmclarity/postgres.env\nPOSTGRESQL_USERNAME=vmclarity\nPOSTGRESQL_PASSWORD=__PostgresDBPassword__\nPOSTGRESQL_DATABASE=vmclarity\nEOF\nchmod 644 /etc/vmclarity/postgres.env\n\ncat << 'EOF' > /etc/vmclarity/gateway.conf\nevents {\n worker_connections 1024;\n}\n\nhttp {\n upstream ui {\n server ui:8080;\n }\n\n upstream uibackend {\n server uibackend:8890;\n }\n\n upstream apiserver {\n server apiserver:8888;\n }\n\n server {\n listen 80;\n absolute_redirect off;\n\n location / {\n proxy_pass http://ui/;\n }\n\n location /ui/api/ {\n proxy_pass http://uibackend/;\n }\n\n location /api/ {\n proxy_set_header X-Forwarded-Host $http_host;\n proxy_set_header X-Forwarded-Prefix /api;\n proxy_set_header X-Forwarded-Proto $scheme;\n proxy_pass http://apiserver/;\n }\n\n location /apidocs/ {\n proxy_pass http://swagger-ui:8080/;\n }\n }\n}\nEOF\nchmod 644 /etc/vmclarity/gateway.conf\n\ncat << 'EOF' > /lib/systemd/system/vmclarity.service\n[Unit]\nDescription=VmClarity\nAfter=docker.service\nRequires=docker.service\n\n[Service]\nTimeoutStartSec=0\nType=oneshot\nRemainAfterExit=true\nExecStart=/usr/bin/docker compose -p vmclarity -f /etc/vmclarity/vmclarity.yaml -f /etc/vmclarity/vmclarity.override.yaml up -d --wait --remove-orphans\nExecStop=/usr/bin/docker compose -p vmclarity -f /etc/vmclarity/vmclarity.yaml -f /etc/vmclarity/vmclarity.override.yaml down\n\n[Install]\nWantedBy=multi-user.target\nEOF\nchmod 644 /lib/systemd/system/vmclarity.service\n\n/etc/vmclarity/deploy.sh\n", "renderedScript": "[reduce(items(variables('params')), createObject('value', variables('scriptTemplate')), lambda('curr', 'next', createObject('value', replace(lambdaVariables('curr').value, format('__{0}__', lambdaVariables('next').key), lambdaVariables('next').value)))).value]", "osDiskType": "StandardSSD_LRS", "linuxConfiguration": { diff --git a/installation/docker/gateway.conf b/installation/docker/gateway.conf index 03f4730e4..388dd699b 100644 --- a/installation/docker/gateway.conf +++ b/installation/docker/gateway.conf @@ -1,5 +1,5 @@ upstream ui { - server ui:80; + server ui:8080; } upstream uibackend { diff --git a/installation/gcp/dm/components/vmclarity-install.sh b/installation/gcp/dm/components/vmclarity-install.sh index 4fbf05c67..a9af2441d 100644 --- a/installation/gcp/dm/components/vmclarity-install.sh +++ b/installation/gcp/dm/components/vmclarity-install.sh @@ -429,7 +429,7 @@ events {{ http {{ upstream ui {{ - server ui:80; + server ui:8080; }} upstream uibackend {{ diff --git a/installation/kubernetes/helm/vmclarity/README.md b/installation/kubernetes/helm/vmclarity/README.md index 11700ebe6..4c53ab9ca 100644 --- a/installation/kubernetes/helm/vmclarity/README.md +++ b/installation/kubernetes/helm/vmclarity/README.md @@ -293,7 +293,7 @@ secrets. | trivyServer.serviceAccount.name | string | `""` | The name of the ServiceAccount to use. If not set and create is true, it will use the component's calculated name. | | ui.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | Force the child process to run as non-privileged | | ui.containerSecurityContext.capabilities.drop | list | `["ALL"]` | List of capabilities to be dropped | -| ui.containerSecurityContext.enabled | bool | `false` | Container security context enabled | +| ui.containerSecurityContext.enabled | bool | `true` | Container security context enabled | | ui.containerSecurityContext.privileged | bool | `false` | Whether the container should run in privileged mode | | ui.containerSecurityContext.readOnlyRootFilesystem | bool | `true` | Mounts the container file system as ReadOnly | | ui.containerSecurityContext.runAsGroup | int | `101` | Group ID which the containers should run as | @@ -304,7 +304,7 @@ secrets. | ui.image.registry | string | `"ghcr.io"` | UI image registry | | ui.image.repository | string | `"openclarity/vmclarity-ui"` | UI image repository | | ui.image.tag | string | `"latest"` | UI image tag | -| ui.podSecurityContext.enabled | bool | `false` | Pod security context enabled | +| ui.podSecurityContext.enabled | bool | `true` | Pod security context enabled | | ui.podSecurityContext.fsGroup | int | `101` | Pod security context fsGroup | | ui.replicas | int | `1` | Number of replicas for the UI service | | ui.resources.limits | object | `{}` | The resources limits for the UI containers | diff --git a/installation/kubernetes/helm/vmclarity/templates/gateway/configmap.yaml b/installation/kubernetes/helm/vmclarity/templates/gateway/configmap.yaml index 4ab894b3f..495123020 100644 --- a/installation/kubernetes/helm/vmclarity/templates/gateway/configmap.yaml +++ b/installation/kubernetes/helm/vmclarity/templates/gateway/configmap.yaml @@ -12,7 +12,7 @@ data: http { upstream ui { - server {{ printf "%s" (include "vmclarity.ui.name" .) }}:80; + server {{ printf "%s" (include "vmclarity.ui.name" .) }}:8080; } upstream uibackend { diff --git a/installation/kubernetes/helm/vmclarity/templates/ui/deployment.yaml b/installation/kubernetes/helm/vmclarity/templates/ui/deployment.yaml index 584469e0f..4f91c0a8b 100644 --- a/installation/kubernetes/helm/vmclarity/templates/ui/deployment.yaml +++ b/installation/kubernetes/helm/vmclarity/templates/ui/deployment.yaml @@ -38,9 +38,13 @@ spec: name: tmpfs-1 - mountPath: /var/cache/nginx name: tmpfs-2 + - mountPath: /tmp + name: tmpfs-3 volumes: - name: tmpfs-1 emptyDir: {} - name: tmpfs-2 emptyDir: {} + - name: tmpfs-3 + emptyDir: {} serviceAccountName: {{ include "vmclarity.ui.serviceAccountName" . }} diff --git a/installation/kubernetes/helm/vmclarity/values.yaml b/installation/kubernetes/helm/vmclarity/values.yaml index e5491f134..f69d6c4ce 100644 --- a/installation/kubernetes/helm/vmclarity/values.yaml +++ b/installation/kubernetes/helm/vmclarity/values.yaml @@ -259,13 +259,13 @@ ui: podSecurityContext: # -- Pod security context enabled - enabled: false + enabled: true # -- Pod security context fsGroup fsGroup: 101 containerSecurityContext: # -- Container security context enabled - enabled: false + enabled: true # -- User ID which the containers should run as runAsUser: 101 # -- Group ID which the containers should run as diff --git a/ui/Dockerfile b/ui/Dockerfile index 8ea8200ab..993a4070a 100644 --- a/ui/Dockerfile +++ b/ui/Dockerfile @@ -20,7 +20,7 @@ RUN --mount=type=cache,target=/src/ui/node_modules \ RUN --mount=type=cache,target=/src/ui/node_modules \ npm run build --prefix /src/ui -FROM nginx:1.27.0@sha256:6af79ae5de407283dcea8b00d5c37ace95441fd58a8b1d2aa1ed93f5511bb18c +FROM nginxinc/nginx-unprivileged:1.27.0 COPY --from=site-build ["/src/ui/build", "/usr/share/nginx/html"] diff --git a/ui/nginx.conf b/ui/nginx.conf index 8237a908e..4d5bc6108 100644 --- a/ui/nginx.conf +++ b/ui/nginx.conf @@ -1,6 +1,6 @@ server { - listen 80; - listen [::]:80; + listen 8080; + listen [::]:8080; server_name localhost; #access_log /var/log/nginx/host.access.log main;