diff --git a/docusaurus.config.js b/docusaurus.config.js index 82621782e..ff352c519 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -30,7 +30,7 @@ module.exports = { ], i18n: { defaultLocale: "en", - locales: ["en", "zh", "kr", "ja"], + locales: ["en", "zh", "kr", "ja", "pt-BR"], localeConfigs: { en: { label: "English", @@ -43,6 +43,9 @@ module.exports = { }, ja: { label: "日本語" + }, + "pt-BR": { + label: "Portguês - Brasil" } }, }, @@ -65,7 +68,7 @@ module.exports = { srcDark: 'img/k3s-logo-dark.svg', }, items: [ - { + { type: 'search', position: 'right', }, @@ -90,7 +93,7 @@ module.exports = { style: 'dark', links: [], copyright: `Copyright © ${new Date().getFullYear()} K3s Project Authors. All rights reserved.
The Linux Foundation has registered trademarks - and uses trademarks. For a list of trademarks of The Linux Foundation, + and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page.`, }, }, @@ -130,5 +133,5 @@ module.exports = { ], }, ], - ], + ], }; diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current.json b/i18n/pt-BR/docusaurus-plugin-content-docs/current.json new file mode 100644 index 000000000..0a6e5beb4 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current.json @@ -0,0 +1,38 @@ +{ + "version.label": { + "message": "Próxima", + "description": "O campo para a versão atual" + }, + "sidebar.mySidebar.category.Installation": { + "message": "Instalação", + "description": "O campo para a categoria Instalação na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.Cluster Datastore": { + "message": "Datastore do Cluster", + "description": "O campo para a categoria Datastore do Cluster na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.Upgrades": { + "message": "Atualizações", + "description": "O campo para a categoria Atualizações na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.Security": { + "message": "Segurança", + "description": "O campo para a categoria Segurança na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.CLI Tools": { + "message": "Ferramentas CLI", + "description": "O campo para a categoria Ferramentas CLI na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.Networking": { + "message": "Rede", + "description": "O campo para a categoria Rede na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.Reference": { + "message": "Referência", + "description": "O campo para a categoria Referência na barra lateral mySidebar" + }, + "sidebar.mySidebar.category.Release Notes": { + "message": "Notas de Lançamento", + "description": "O campo para a categoria Notas de Lançamento na barra lateral mySidebar" + } +} diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/advanced.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/advanced.md new file mode 100644 index 000000000..275140af3 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/advanced.md @@ -0,0 +1,468 @@ +--- +title: "Opções Avançadas / Configuração" +--- + +Esta seção contém informações avançadas descrevendo as diferentes maneiras de executar e gerenciar o K3s, bem como as etapas necessárias para preparar o sistema operacional host para uso do K3s. + +## Gestão de Certificados + +### Certificados de Autoridade Certificadora + +O K3s gera Certificados de Autoridade de Certificação (CA) autoassinados durante a inicialização do primeiro nó do servidor. Esses certificados de CA são válidos por 10 anos e não são renovados automaticamente. + +Para obter informações sobre o uso de certificados de CA personalizados ou a renovação de certificados de CA autoassinados, consulte a [documentação do comando `k3s certificate rotate-ca`](./cli/certificate.md#certificate-authority-ca-certificates). + +### Certificados de cliente e servidor + +Os certificados de cliente e servidor do K3s são válidos por 365 dias a partir da data de emissão. Quaisquer certificados que estejam expirados, ou dentro de 90 dias de expiração, são renovados automaticamente toda vez que o K3s é iniciado. + +Para obter informações sobre a rotação manual de certificados de cliente e servidor, consulte a [documentação do comando `k3s certificate rotate`](./cli/certificate.md#client-and-server-certificates). + +## Gerenciamento de Tokens + +Por padrão, o K3s usa um único token estático para servidores e agentes. Este token não pode ser alterado depois que o cluster foi criado. +É possível habilitar um segundo token estático que só pode ser usado para unir agentes ou para criar tokens de união temporários no estilo `kubeadm` que expiram automaticamente. +Para obter mais informações, consulte a [documentação do comando `k3s token`](./cli/token.md). + +## Configurando um proxy HTTP + +Se você estiver executando o K3s em um ambiente que só tem conectividade externa por meio de um proxy HTTP, você pode configurar suas configurações de proxy no serviço systemd do K3s. Essas configurações de proxy serão então usadas no K3s e passadas para o containerd e o kubelet incorporados. + +O script de instalação do K3s pegará automaticamente as variáveis ​​`HTTP_PROXY`, `HTTPS_PROXY` e `NO_PROXY`, bem como as variáveis ​​`CONTAINERD_HTTP_PROXY`, `CONTAINERD_HTTPS_PROXY` e `CONTAINERD_NO_PROXY` do shell atual, se estiverem presentes, e as gravará no arquivo de ambiente do seu serviço systemd, geralmente: + +- `/etc/systemd/system/k3s.service.env` +- `/etc/systemd/system/k3s-agent.service.env` + +Claro, você também pode configurar o proxy editando esses arquivos. + +O K3s adicionará automaticamente os intervalos de IP do Pod e do Serviço interno do cluster e o domínio DNS do cluster à lista de entradas `NO_PROXY`. Você deve garantir que os intervalos de endereços IP usados ​​pelos próprios nós do Kubernetes (ou seja, os IPs públicos e privados dos nós) estejam incluídos na lista `NO_PROXY` ou que os nós possam ser acessados ​​por meio do proxy. + +``` +HTTP_PROXY=http://your-proxy.example.com:8888 +HTTPS_PROXY=http://your-proxy.example.com:8888 +NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 +``` + +Se você quiser configurar as configurações de proxy para o containerd sem afetar o K3s e o Kubelet, você pode prefixar as variáveis ​​com `CONTAINERD_`: + +``` +CONTAINERD_HTTP_PROXY=http://your-proxy.example.com:8888 +CONTAINERD_HTTPS_PROXY=http://your-proxy.example.com:8888 +CONTAINERD_NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 +``` + +## Usando o Docker como o tempo de execução do contêiner + +O K3s inclui e tem como padrão [containerd](https://containerd.io/), um tempo de execução de contêiner padrão do setor. +A partir do Kubernetes 1.24, o Kubelet não inclui mais dockershim, o componente que permite que o kubelet se comunique com o dockerd. +O K3s 1.24 e superior incluem [cri-dockerd](https://github.com/Mirantis/cri-dockerd), que permite atualização contínua de versões anteriores do K3s enquanto continua a usar o tempo de execução do contêiner Docker. + +Para usar o Docker em vez do containerd: + +1. Instale o Docker no nó K3s. Um dos [scripts de instalação do Docker](https://github.com/rancher/install-docker) do Rancher pode ser usado para instalar o Docker: + + ```bash + curl https://releases.rancher.com/install-docker/20.10.sh | sh + ``` + +2. Instale o K3s usando a opção `--docker`: + + ```bash + curl -sfL https://get.k3s.io | sh -s - --docker + ``` + +3. Confirme se o cluster está disponível: + + ```bash + $ sudo k3s kubectl get pods --all-namespaces + NAMESPACE NAME READY STATUS RESTARTS AGE + kube-system local-path-provisioner-6d59f47c7-lncxn 1/1 Running 0 51s + kube-system metrics-server-7566d596c8-9tnck 1/1 Running 0 51s + kube-system helm-install-traefik-mbkn9 0/1 Completed 1 51s + kube-system coredns-8655855d6-rtbnb 1/1 Running 0 51s + kube-system svclb-traefik-jbmvl 2/2 Running 0 43s + kube-system traefik-758cd5fc85-2wz97 1/1 Running 0 43s + ``` + +4. Confirme se os contêineres do Docker estão em execução: + + ```bash + $ sudo docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 3e4d34729602 897ce3c5fc8f "entry" About a minute ago Up About a minute k8s_lb-port-443_svclb-traefik-jbmvl_kube-system_d46f10c6-073f-4c7e-8d7a-8e7ac18f9cb0_0 + bffdc9d7a65f rancher/klipper-lb "entry" About a minute ago Up About a minute k8s_lb-port-80_svclb-traefik-jbmvl_kube-system_d46f10c6-073f-4c7e-8d7a-8e7ac18f9cb0_0 + 436b85c5e38d rancher/library-traefik "/traefik --configfi…" About a minute ago Up About a minute k8s_traefik_traefik-758cd5fc85-2wz97_kube-system_07abe831-ffd6-4206-bfa1-7c9ca4fb39e7_0 + de8fded06188 rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_svclb-traefik-jbmvl_kube-system_d46f10c6-073f-4c7e-8d7a-8e7ac18f9cb0_0 + 7c6a30aeeb2f rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_traefik-758cd5fc85-2wz97_kube-system_07abe831-ffd6-4206-bfa1-7c9ca4fb39e7_0 + ae6c58cab4a7 9d12f9848b99 "local-path-provisio…" About a minute ago Up About a minute k8s_local-path-provisioner_local-path-provisioner-6d59f47c7-lncxn_kube-system_2dbd22bf-6ad9-4bea-a73d-620c90a6c1c1_0 + be1450e1a11e 9dd718864ce6 "/metrics-server" About a minute ago Up About a minute k8s_metrics-server_metrics-server-7566d596c8-9tnck_kube-system_031e74b5-e9ef-47ef-a88d-fbf3f726cbc6_0 + 4454d14e4d3f c4d3d16fe508 "/coredns -conf /etc…" About a minute ago Up About a minute k8s_coredns_coredns-8655855d6-rtbnb_kube-system_d05725df-4fb1-410a-8e82-2b1c8278a6a1_0 + c3675b87f96c rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_coredns-8655855d6-rtbnb_kube-system_d05725df-4fb1-410a-8e82-2b1c8278a6a1_0 + 4b1fddbe6ca6 rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_local-path-provisioner-6d59f47c7-lncxn_kube-system_2dbd22bf-6ad9-4bea-a73d-620c90a6c1c1_0 + 64d3517d4a95 rancher/pause:3.1 "/pause" + ``` + +## Usando etcdctl + +etcdctl fornece uma CLI para interagir com servidores etcd. O K3s não empacota etcdctl. + +Se você quiser usar o etcdctl para interagir com o etcd incorporado do K3s, instale o etcdctl usando a [documentação oficial](https://etcd.io/docs/latest/install/). + +```bash +ETCD_VERSION="v3.5.5" +ETCD_URL="https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz" +curl -sL ${ETCD_URL} | sudo tar -zxv --strip-components=1 -C /usr/local/bin +``` + +Você pode então usar o etcdctl configurando-o para usar os certificados e chaves gerenciados pelo K3s para autenticação: + +```bash +sudo etcdctl version \ + --cacert=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt \ + --cert=/var/lib/rancher/k3s/server/tls/etcd/client.crt \ + --key=/var/lib/rancher/k3s/server/tls/etcd/client.key +``` + +## Configurando o containerd + +O K3s gerará config.toml para o containerd em `/var/lib/rancher/k3s/agent/etc/containerd/config.toml`. + +Para personalização avançada deste arquivo, você pode criar outro arquivo chamado `config.toml.tmpl` no mesmo diretório, e ele será usado em seu lugar. + +O `config.toml.tmpl` será tratado como um arquivo de modelo Go, e a estrutura `config.Node` está sendo passada para o modelo. Veja [esta pasta](https://github.com/k3s-io/k3s/blob/master/pkg/agent/templates) para exemplos de Linux e Windows sobre como usar a estrutura para personalizar o arquivo de configuração. +A estrutura golang config.Node é definida [aqui](https://github.com/k3s-io/k3s/blob/master/pkg/daemons/config/types.go#L37) + +### Modelo base + +:::info Nota de Versão +Disponível a partir das versões de setembro de 2023: v1.24.17+k3s1, v1.25.13+k3s1, v1.26.8+k3s1, v1.27.5+k3s1, v1.28.1+k3s1 +::: + +Você pode estender o modelo base do K3s em vez de copiar e colar o modelo de estoque completo do código-fonte do K3s. Isso é útil se você precisa construir sobre a configuração existente e adicionar algumas linhas extras no final. + +```toml +#/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl + +{{ template "base" . }} + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes."custom"] + runtime_type = "io.containerd.runc.v2" +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes."custom".options] + BinaryName = "/usr/bin/custom-container-runtime" + +``` +## Suporte alternativo de tempo de execução de contêiner + +O K3s detectará automaticamente runtimes de contêiner alternativos se eles estiverem presentes quando o K3s iniciar. Os runtimes de contêiner suportados são: +``` +crun, lunatic, nvidia, nvidia-cdi, nvidia-experimental, slight, spin, wasmedge, wasmer, wasmtime, wws +``` + +As GPUs NVIDIA exigem a instalação do NVIDIA Container Runtime para agendar e executar cargas de trabalho aceleradas em Pods. Para usar GPUs NVIDIA com K3s, execute as seguintes etapas: + +1. Instale o repositório de pacotes nvidia-container no nó seguindo as instruções em: + https://nvidia.github.io/libnvidia-container/ +2. Instale os pacotes de tempo de execução do contêiner nvidia. Por exemplo: + `apt install -y nvidia-container-runtime cuda-drivers-fabricmanager-515 nvidia-headless-515-server` +3. [Instale o K3s](./installation), ou reinicie-o se já estiver instalado. +4. Confirme se o tempo de execução do contêiner nvidia foi encontrado pelo k3s: + `grep nvidia /var/lib/rancher/k3s/agent/etc/containerd/config.toml` + +Se essas etapas forem seguidas corretamente, o K3s adicionará automaticamente os tempos de execução da NVIDIA à configuração do containerd, dependendo de quais executáveis ​​de tempo de execução forem encontrados. + +:::info Nota de Versão +O sinalizador `--default-runtime` e os recursos RuntimeClass integrados estão disponíveis a partir das versões de dezembro de 2023: v1.29.0+k3s1, v1.28.5+k3s1, v1.27.9+k3s1, v1.26.12+k3s1 +Antes dessas versões, você deve implantar seus próprios recursos RuntimeClass para quaisquer tempos de execução que deseja referenciar nas especificações do Pod. +::: + +O K3s inclui definições de Kubernetes RuntimeClass para todos os runtimes alternativos suportados. Você pode selecionar um deles para substituir `runc` como o runtime padrão em um nó definindo o valor `--default-runtime` por meio do k3s CLI ou do arquivo de configuração. + +Se você não alterou o tempo de execução padrão nos nós da GPU, deverá solicitar explicitamente o tempo de execução da NVIDIA definindo `runtimeClassName: nvidia` na especificação do Pod: +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nbody-gpu-benchmark + namespace: default +spec: + restartPolicy: OnFailure + runtimeClassName: nvidia + containers: + - name: cuda-container + image: nvcr.io/nvidia/k8s/cuda-sample:nbody + args: ["nbody", "-gpu", "-benchmark"] + resources: + limits: + nvidia.com/gpu: 1 + env: + - name: NVIDIA_VISIBLE_DEVICES + value: all + - name: NVIDIA_DRIVER_CAPABILITIES + value: all +``` + +Observe que o NVIDIA Container Runtime também é frequentemente usado com o [NVIDIA Device Plugin](https://github.com/NVIDIA/k8s-device-plugin/), com modificações para garantir que as especificações do pod incluam `runtimeClassName: nvidia`, conforme mencionado acima. + +## Executando Servidores sem Agentes (Experimental) +> **Aviso:** Este recurso é experimental. + +Quando iniciados com o sinalizador `--disable-agent`, os servidores não executam o kubelet, o tempo de execução do contêiner ou o CNI. Eles não registram um recurso Node no cluster e não aparecerão na saída `kubectl get nodes`. +Como eles não hospedam um kubelet, eles não podem executar pods ou ser gerenciados por operadores que dependem da enumeração de nós do cluster, incluindo o controlador etcd incorporado e o controlador de atualização do sistema. + +Executar servidores sem agentes pode ser vantajoso se você quiser ocultar os nós do seu plano de controle da descoberta por agentes e cargas de trabalho, ao custo de maior sobrecarga administrativa causada pela falta de suporte do operador de cluster. + +Por padrão, o apiserver em servidores sem agente não poderá fazer conexões de saída para webhooks de admissão ou apiservices agregados em execução no cluster. Para remediar isso, defina o sinalizador de servidor `--egress-selector-mode` como `pod` ou `cluster`. Se você estiver alterando esse sinalizador em um cluster existente, precisará reiniciar todos os nós no cluster para que a opção entre em vigor. + +## Executando Servers Rootless (Experimental) +> **Aviso:** Este recurso é experimental. + +O modo Rootless permite executar servidores K3s como um usuário sem privilégios, a fim de proteger a raiz real no host contra possíveis ataques de invasão de contêiner. + +Acesse https://rootlesscontaine.rs/ para saber mais sobre o Rootless Kubernetes. + +### Problemas conhecidos com o modo Rootless + +* **Portas** + + Ao executar em modo rootless, um novo namespace de rede é criado. Isso significa que a instância do K3s está sendo executada com a rede bastante separada do host. + A única maneira de acessar os serviços executados no K3s a partir do host é configurar encaminhamentos de porta para o namespace de rede do K3s. + O K3s rootless inclui um controlador que vinculará automaticamente as portas 6443 e de serviço abaixo de 1024 ao host com um deslocamento de 10000. + + For example, a Service on port 80 will become 10080 on the host, but 8080 will become 8080 without any offset. Currently, only LoadBalancer Services are automatically bound. + +* **Cgroups** + + Cgroup v1 e Hybrid v1/v2 não são suportados; somente Cgroup v2 puro é suportado. Se o K3s falhar ao iniciar devido a cgroups ausentes ao executar em rootless, é provável que seu nó esteja no modo Hybrid, e os cgroups "ausentes" ainda estejam vinculados a um controlador v1. + +* **Multi-node/multi-process cluster** + + Clusters rootless de vários nós, ou vários processos k3s rootless no mesmo nó, não são suportados atualmente. Veja [#6488](https://github.com/k3s-io/k3s/issues/6488#issuecomment-1314998091) para mais detalhes. + +### Iniciando Servers Rootless +* Habilite a delegação do cgroup v2, veja https://rootlesscontaine.rs/getting-started/common/cgroup2/ . + Esta etapa é necessária; o kubelet rootless falhará ao iniciar sem os cgroups apropriados delegados. + +* Baixe `k3s-rootless.service` de [`https://github.com/k3s-io/k3s/blob//k3s-rootless.service`](https://github.com/k3s-io/k3s/blob/master/k3s-rootless.service). + Certifique-se de usar a mesma versão de `k3s-rootless.service` e `k3s`. + +* Instale `k3s-rootless.service` em `~/.config/systemd/user/k3s-rootless.service`. + A instalação deste arquivo como um serviço de todo o sistema (`/etc/systemd/...`) não é suportada. + Dependendo do caminho do binário `k3s`, pode ser necessário modificar a linha `ExecStart=/usr/local/bin/k3s ...` do arquivo. + +* Execute `systemctl --user daemon-reload` + +* Execute `systemctl --user enable --now k3s-rootless` + +* Execute `KUBECONFIG=~/.kube/k3s.yaml kubectl get pods -A` e certifique-se de que os pods estejam em execução. + +> **Observação:** não tente executar `k3s server --rootless` em um terminal, pois as sessões de terminal não permitem delegação do cgroup v2. +> Se você realmente precisar testá-lo em um terminal, use `systemd-run --user -p Delegate=yes --tty k3s server --rootless` para envolvê-lo em um escopo systemd. + +### Configurações Avançadas Rootless + +O Rootless K3s usa [rootlesskit](https://github.com/rootless-containers/rootlesskit) e [slirp4netns](https://github.com/rootless-containers/slirp4netns) para se comunicar entre namespaces de rede de host e usuário. +Algumas das configurações usadas pelo rootlesskit e slirp4nets podem ser definidas por variáveis ​​de ambiente. A melhor maneira de defini-las é adicioná-las ao campo `Environment` da unidade systemd k3s-rootless. + +| Variável | Valor Padrão | Descrição | +| ------------------------------------ | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `K3S_ROOTLESS_MTU` | 1500 | Define a MTU para as interfaces virtuais slirp4netns. | +| `K3S_ROOTLESS_CIDR` | 10.41.0.0/16 | Define o CIDR usado pelas interfaces virtuais slirp4netns. | +| `K3S_ROOTLESS_ENABLE_IPV6` | autotedected | Habilita o suporte IPv6 do slirp4netns. Se não for especificado, ele será habilitado automaticamente se o K3s estiver configurado para operação dual-stack. | +| `K3S_ROOTLESS_PORT_DRIVER` | builtin | Seleciona o driver de porta sem raiz; `builtin` ou `slirp4netns`. Builtin é mais rápido, mas mascara o endereço de origem original dos pacotes de entrada. | +| `K3S_ROOTLESS_DISABLE_HOST_LOOPBACK` | true | Controla se o acesso ao endereço de loopback dos hosts via interface de gateway está habilitado ou não. É recomendado que isso não seja alterado, por razões de segurança. | + +### Solução para problemas com Rootless Mode + +* Execute `systemctl --user status k3s-rootless` para verificar o status do daemon +* Execute `journalctl --user -f -u k3s-rootless` para ver o log do daemon +* Veja também https://rootlesscontaine.rs/ + +## Node Labels e Taints + +Os agentes K3s podem ser configurados com as opções `--node-label` e `--node-taint` que adicionam uma label e taint ao kubelet. As duas opções apenas adicionam labels e/ou taints [no momento do registro](./cli/agent.md#node-labels-and-taints-for-agents), então eles só podem ser definidos quando o nó é unido ao cluster pela primeira vez. + +Todas as versões atuais do Kubernetes restringem os nós de se registrarem com a maioria das labels com prefixos `kubernetes.io` e `k8s.io`, incluindo especificamente a label `kubernetes.io/role`. Se você tentar iniciar um nó com uma label não permitido, o K3s falhará ao iniciar. Conforme declarado pelos autores do Kubernetes: + +> Os nós não têm permissão para afirmar suas próprias labels de função. As funções de nó são normalmente usadas para identificar tipos de nós privilegiados ou de plano de controle, e permitir que os nós se rotulem nesse pool permite que um nó comprometido atraia trivialmente cargas de trabalho (como daemonsets de plano de controle) que conferem acesso a credenciais de privilégio mais alto. + +Consulte [SIG-Auth KEP 279](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/279-limit-node-access/README.md) para obter mais informações. + +Se você quiser alterar as labels e taints dos nós após o registro do nó, ou adicionar labels reservados, você deve usar `kubectl`. Consulte a documentação oficial do Kubernetes para obter detalhes sobre como adicionar [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) e [label dos nós.](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node) + +## Iniciando o Serviço com o Script de Instalação + +O script de instalação detectará automaticamente se o seu sistema operacional está usando systemd ou openrc e habilitará e iniciará o serviço como parte do processo de instalação. +* Ao executar com openrc, os logs serão criados em `/var/log/k3s.log`. +* Ao executar com systemd, os logs serão criados em `/var/log/syslog` e visualizados usando `journalctl -u k3s` (ou `journalctl -u k3s-agent` em agentes). + +Um exemplo de desabilitação de inicialização automática e habilitação de serviço com o script de instalação: + +```bash +curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_ENABLE=true sh - +``` + +## Executando K3s no Docker + +Existem várias maneiras de executar o K3s no Docker: + + + + +[k3d](https://github.com/k3d-io/k3d) é um utilitário projetado para executar facilmente clusters K3s de vários nós no Docker. + +O k3d torna muito fácil criar clusters k3s de nó único e múltiplo no docker, por exemplo, para desenvolvimento local no Kubernetes. + +Consulte a documentação [Instalação](https://k3d.io/#installation) para obter mais informações sobre como instalar e usar o k3d. + + + + +Para usar o Docker, as imagens `rancher/k3s` também estão disponíveis para executar o servidor e o agente K3s. +Usando o comando `docker run`: + +```bash +sudo docker run \ + --privileged \ + --name k3s-server-1 \ + --hostname k3s-server-1 \ + -p 6443:6443 \ + -d rancher/k3s:v1.24.10-k3s1 \ + server +``` +:::note +Você deve especificar uma versão válida do K3s como a tag; a tag `latest` não é mantida. +As imagens do Docker não permitem um sinal `+` em tags, use um `-` na tag em vez disso. +::: + +Quando o K3s estiver instalado e funcionando, você pode copiar o kubeconfig do administrador do contêiner do Docker para uso: +```bash +sudo docker cp k3s-server-1:/etc/rancher/k3s/k3s.yaml ~/.kube/config +``` + + + + +## SELinux Support + +:::info Nota de Versão + +Disponível a partir da versão 1.19.4+k3s1 + +::: + +Se você estiver instalando o K3s em um sistema onde o SELinux está habilitado por padrão (como o CentOS), você deve garantir que as políticas adequadas do SELinux tenham sido instaladas. + + + + +O [script de instalação](./installation/configuration.md#configuration-with-install-script) instalará automaticamente o SELinux RPM do repositório Rancher RPM se estiver em um sistema compatível, caso não esteja executando uma instalação air-gapped. A instalação automática pode ser ignorada definindo `INSTALL_K3S_SKIP_SELINUX_RPM=true`. + + + + + +As políticas necessárias podem ser instaladas com os seguintes comandos: +```bash +yum install -y container-selinux selinux-policy-base +yum install -y https://rpm.rancher.io/k3s/latest/common/centos/7/noarch/k3s-selinux-1.4-1.el7.noarch.rpm +``` + +Para forçar o script de instalação a registrar um aviso em vez de falhar, você pode definir a seguinte variável de ambiente: `INSTALL_K3S_SELINUX_WARN=true`. + + + +### Habilitando SELinux Enforcement + +Para aproveitar o SELinux, especifique o sinalizador `--selinux` ao iniciar servidores e agentes K3s. + +Esta opção também pode ser especificada no [arquivo de configuração](./installation/configuration.md#configuration-file) do K3s. + +``` +selinux: true +``` + +Não há suporte para usar um `--data-dir` personalizado no SELinux. Para personalizá-lo, você provavelmente precisará escrever sua própria política personalizada. Para orientação, você pode consultar o repositório [containers/container-selinux](https://github.com/containers/container-selinux), que contém os arquivos de política do SELinux para Container Runtimes, e o repositório [k3s-io/k3s-selinux](https://github.com/k3s-io/k3s-selinux), que contém a política do SELinux para o K3s. + +## Habilitando o Lazy Pulling do eStargz (Experimental) + +### O que é Lazy Pulling e eStargz? + +Puxar imagens é conhecido como uma das etapas que consomem mais tempo no ciclo de vida do contêiner. +De acordo com [Harter, et al.](https://www.usenix.org/conference/fast16/technical-sessions/presentation/harter), + +> a extração de pacotes é responsável por 76% do tempo de inicialização do contêiner, mas apenas 6,4% desses dados são lidos + +Para resolver esse problema, o k3s suporta experimentalmente *lazy pulling* de conteúdos de imagem. +Isso permite que o k3s inicie um contêiner antes que a imagem inteira tenha sido puxada. +Em vez disso, os pedaços necessários de conteúdo (por exemplo, arquivos individuais) são buscados sob demanda. +Especialmente para imagens grandes, essa técnica pode encurtar a latência de inicialização do contêiner. + +Para habilitar o lazy pulling, a imagem de destino precisa ser formatada como [*eStargz*](https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md). +Este é um formato de imagem alternativo ao OCI, mas 100% compatível com OCI para lazy pulling. +Devido à compatibilidade, o eStargz pode ser enviado para registros de contêineres padrão (por exemplo, ghcr.io), assim como isso é *ainda executável* mesmo em tempos de execução agnósticos do eStargz. + +O eStargz é desenvolvido com base no [formato stargz proposto pelo projeto Google CRFS](https://github.com/google/crfs), mas vem com recursos práticos, incluindo verificação de conteúdo e otimização de desempenho. +Para mais detalhes sobre lazy pulling e eStargz, consulte o [repositório do projeto Stargz Snapshotter](https://github.com/containerd/stargz-snapshotter). + +### Configurar k3s para lazy pulling de eStargz + +Conforme mostrado a seguir, a opção `--snapshotter=stargz` é necessária para o servidor e o agente k3s. + +```bash +k3s server --snapshotter=stargz +``` + +Com essa configuração, você pode executar lazy pulling para imagens formatadas em eStargz. +O manifesto de Pod de exemplo a seguir usa a imagem `node:13.13.0` formatada em eStargz (`ghcr.io/stargz-containers/node:13.13.0-esgz`). +Quando o snapshotter stargz está habilitado, o K3s executa lazy pulling para essa imagem. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nodejs +spec: + containers: + - name: nodejs-estargz + image: ghcr.io/stargz-containers/node:13.13.0-esgz + command: ["node"] + args: + - -e + - var http = require('http'); + http.createServer(function(req, res) { + res.writeHead(200); + res.end('Hello World!\n'); + }).listen(80); + ports: + - containerPort: 80 +``` + +## Fontes Adicionais de Logs + +[Rancher logging](https://rancher.com/docs/rancher/v2.6/en/logging/helm-chart-options/) para K3s pode ser instalado sem usar o Rancher. As seguintes instruções devem ser executadas para fazer isso: + +```bash +helm repo add rancher-charts https://charts.rancher.io +helm repo update +helm install --create-namespace -n cattle-logging-system rancher-logging-crd rancher-charts/rancher-logging-crd +helm install --create-namespace -n cattle-logging-system rancher-logging --set additionalLoggingSources.k3s.enabled=true rancher-charts/rancher-logging +``` + +## Logs Adicionais de Política de Rede + +Pacotes descartados por políticas de rede podem ser registrados. O pacote é enviado para a ação NFLOG do iptables, que mostra os detalhes do pacote, incluindo a política de rede que o bloqueou. + +Se houver muito tráfego, o número de mensagens de log pode ser muito alto. Para controlar a taxa de log em uma base por política, defina os parâmetros iptables `limit` e `limit-burst` adicionando as seguintes anotações à política de rede em questão: +* `kube-router.io/netpol-nflog-limit=` +* `kube-router.io/netpol-nflog-limit-burst=` + +Os valores padrão são `limit=10/minute` e `limit-burst=10`. Verifique o [manual do iptables](https://www.netfilter.org/documentation/HOWTO/packet-filtering-HOWTO-7.html#:~:text=restrict%20the%20rate%20of%20matches) para obter mais informações sobre o formato e os valores possíveis para esses campos. + +Para converter pacotes NFLOG em entradas de log, instale o ulogd2 e configure `[log1]` para ler em `group=100`. Em seguida, reinicie o serviço ulogd2 para que a nova configuração seja confirmada. +Quando um pacote é bloqueado por regras de política de rede, uma mensagem de log aparecerá em `/var/log/ulog/syslogemu.log`. + +Os pacotes enviados para o soquete netlink NFLOG também podem ser lidos usando ferramentas de linha de comando como tcpdump ou tshark: +```bash +tcpdump -ni nflog:100 +``` +Embora mais prontamente disponível, o tcpdump não mostrará o nome da política de rede que bloqueou o pacote. Use o comando tshark do wireshark em vez disso para exibir o cabeçalho completo do pacote NFLOG, incluindo o campo `nflog.prefix` que contém o nome da política. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/architecture.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/architecture.md new file mode 100644 index 000000000..b6f0e2a34 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/architecture.md @@ -0,0 +1,86 @@ +--- +title: Arquitetura +--- + +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + +### Servers e Agents + +* Um nó de servidor é definido como um host que executa o comando `k3s server`, com componentes de plano de controle e armazenamento de dados gerenciados pelo K3s. +* Um nó de agente é definido como um host executando o comando `k3s agent`, sem nenhum componente de armazenamento de dados ou plano de controle. +* Tanto os servidores quanto os agentes executam o kubelet, o tempo de execução do contêiner e o CNI. Consulte a documentação [Advanced Options](./advanced.md#running-agentless-servers-experimental) para obter mais informações sobre a execução de servidores sem agentes. + +![](/img/how-it-works-k3s-revised.svg) + +### Configuração de Servidor Único com um Banco de Dados Incorporado + +O diagrama a seguir mostra um exemplo de um cluster que tem um servidor K3s de nó único com um banco de dados SQLite incorporado. + +Nessa configuração, cada nó de agente é registrado no mesmo nó de servidor. Um usuário do K3s pode manipular recursos do Kubernetes chamando a API do K3s no nó de servidor. + + + +### K3s de Alta Disponibilidade + +Clusters de servidor único podem atender a uma variedade de casos de uso, mas para ambientes onde o tempo de atividade do plano de controle do Kubernetes é crítico, você pode executar o K3s em uma configuração de HA. Um cluster de HA K3s compreende: + + + + +* Três ou mais **nós de servidor** que servirão a API do Kubernetes e executarão outros serviços de plano de controle +* Um **armazenamento de dados etcd incorporado** (em oposição ao armazenamento de dados SQLite incorporado usado em configurações de servidor único) + + + + + + + +* Dois ou mais **nós de servidor** que servirão a API do Kubernetes e executarão outros serviços de plano de controle +* Um **armazenamento de dados externo** (como MySQL, PostgreSQL ou etcd) + + + + + + +### Endereço de Registro Fixo para Nós de Agente + +Na configuração do servidor de alta disponibilidade, cada nó também pode se registrar na API do Kubernetes usando um endereço de registro fixo, conforme mostrado no diagrama abaixo. + +Após o registro, os nós do agente estabelecem uma conexão diretamente com um dos nós do servidor. + + + +### Como Funciona o Registro do Nó do Agente + +Os nós do agente são registrados com uma conexão websocket iniciada pelo processo `k3s agent`, e a conexão é mantida por um balanceador de carga do lado do cliente em execução como parte do processo do agente. Inicialmente, o agente se conecta ao supervisor (e ao kube-apiserver) por meio do balanceador de carga local na porta 6443. O balanceador de carga mantém uma lista de endpoints disponíveis para conexão. O endpoint padrão (e inicialmente único) é semeado pelo nome do host do endereço `--server`. Depois de se conectar ao cluster, o agente recupera uma lista de endereços kube-apiserver da lista de endpoints de serviço do Kubernetes no namespace padrão. Esses endpoints são adicionados ao balanceador de carga, que então mantém conexões estáveis ​​com todos os servidores no cluster, fornecendo uma conexão com o kube-apiserver que tolera interrupções de servidores individuais. + +Os agentes se registrarão no servidor usando o segredo do cluster de nós junto com uma senha gerada aleatoriamente para o nó, armazenada em `/etc/rancher/node/password`. O servidor armazenará as senhas para nós individuais como segredos do Kubernetes, e quaisquer tentativas subsequentes devem usar a mesma senha. Os segredos de senha do nó são armazenados no namespace `kube-system` com nomes usando o modelo `.node-password.k3s`. Isso é feito para proteger a integridade dos IDs do nó. + +Se o diretório `/etc/rancher/node` de um agente for removido, ou você desejar se juntar novamente a um nó usando um nome existente, o nó deve ser excluído do cluster. Isso limpará tanto a entrada do nó antigo quanto o segredo da senha do nó e permitirá que o nó (re)junte-se ao cluster. + +Se você reutiliza nomes de host com frequência, mas não consegue remover os segredos de senha do nó, um ID de nó exclusivo pode ser automaticamente anexado ao nome de host ao iniciar servidores ou agentes K3s usando o sinalizador `--with-node-id`. Quando habilitado, o ID do nó também é armazenado em `/etc/rancher/node/`. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/agent.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/agent.md new file mode 100644 index 000000000..073df022c --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/agent.md @@ -0,0 +1,174 @@ +--- +title: agente +--- + +# K3s Agente + +Nesta seção, você aprenderá como configurar o agente K3s. + +Observe que os servidores também executam um agente, portanto, todos os sinalizadores listados nesta página também são válidos para uso em servidores. + +As opções são documentadas nesta página como sinalizadores CLI, mas também podem ser passadas como opções de arquivo de configuração. Veja a documentação do [Arquivo de configuração](../installation/configuration.md#configuration-file) para mais informações sobre o uso de arquivos de configuração YAML. + +### Logging + +| Flag | Valor Padrão | Descrição | +| ----------------------- | ------------ | ------------------------------------------------------------------------------------------------------ | +| `-v` value | 0 | Número para a verbosidade do nível de log | +| `--vmodule` value | N/A | Lista separada por vírgulas de configurações FILE_PATTERN=LOG_LEVEL para registro filtrado por arquivo | +| `--log value, -l` value | N/A | Registrar para arquivar | +| `--alsologtostderr` | N/A | Registre no erro padrão e também no arquivo (se definido) | + +### Opções de Cluster + +| Flag | Variável de Ambiente | Descrição | +| -------------------------- | -------------------- | ---------------------------------------------- | +| `--token value, -t` value | `K3S_TOKEN` | Token a ser usado para autenticação | +| `--token-file` value | `K3S_TOKEN_FILE` | Arquivo de token a ser usado para autenticação | +| `--server value, -s` value | `K3S_URL` | Servidor para conectar | + +### Listener + +| Flag | Valor Padrão | Descrição | +| ---------------- | ------------ | ----------------------- | +| `--bind-address` | 0.0.0.0 | Endereço de ligação k3s | + +### Data + +| Flag | Valor Padrão | Descrição | +| ---------------------------- | ---------------------- | --------------------------- | +| `--data-dir value, -d` value | "/var/lib/rancher/k3s" | Pasta para armazenar estado | + +### Nó + +| Flag | Variável de Ambiente | Descrição | +| --------------------------- | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--node-name` value | `K3S_NODE_NAME` | nome do Nó | +| `--with-node-id` | N/A | Adicionar id ao nome do Nó | +| `--node-label` value | N/A | Registrando e iniciando o kubelet com conjunto de rótulos | +| `--node-taint` value | N/A | Registrando kubelet com conjunto de taints | +| `--protect-kernel-defaults` | N/A | Comportamento de ajuste do kernel. Se definido, erro se os ajustes do kernel forem diferentes dos padrões do kubelet. | +| `--selinux` | `K3S_SELINUX` | Habilitar SELinux no containerd | +| `--lb-server-port` value | `K3S_LB_SERVER_PORT` | Porta local para o balanceador de carga do cliente supervisor. Se o supervisor e o apiserver não estiverem colocalizados, uma porta adicional 1 a menos que esta porta também será usada para o balanceador de carga do cliente apiserver. (padrão: 6444) | + +### Runtime + +| Flag | Valor Padrão | Descrição | +| ------------------------------ | ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--container-runtime-endpoint` | N/A | Desabilite o containerd incorporado e use o soquete CRI no caminho fornecido; quando usado com --docker, isso define o caminho do soquete do docker | +| `--default-runtime` | N/A | Defina o tempo de execução padrão no containerd | +| `--image-service-endpoint` | N/A | Desabilite o serviço de imagem containerd incorporado e use o socket de serviço de imagem remoto no caminho fornecido. Se não for especificado, o padrão é --container-runtime-endpoint. | +| `--pause-image` value | "docker.io/rancher/pause:3.1" | Imagem de pausa personalizada para containerd ou docker sandbox | +| `--private-registry` value | "/etc/rancher/k3s/registries.yaml" | Arquivo de configuração de registro privado | + +### Rede + +| Flag | Variável de Ambiente | Descrição | +| --------------------------- | -------------------- | ----------------------------------------------------- | +| `--node-ip value, -i` value | N/A | Endereço IP para anunciar o nó | +| `--node-external-ip` value | N/A | Endereço IP externo para anunciar o nó | +| `--node-internal-dns` | N/A | endereços DNS internos para anunciar o nó | +| `--node-external-dns` | N/A | endereços DNS externos para anunciar o nó | +| `--resolv-conf` value | `K3S_RESOLV_CONF` | Arquivo resolv.conf do Kubelet | +| `--flannel-iface` value | N/A | Substituir interface de flanela padrão | +| `--flannel-conf` value | N/A | Substituir arquivo de configuração flannel padrão | +| `--flannel-cni-conf` value | N/A | Substituir arquivo de configuração cni flannel padrão | + +### Flags Customizadas + +| Flag | Descrição | +| ------------------------ | ------------------------------------------------- | +| `--kubelet-arg` value | Bandeira personalizada para o processo kubelet | +| `--kube-proxy-arg` value | Bandeira personalizada para o processo kube-proxy | + +### Experimental + +| Flag | Descrição | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--rootless` | Executa modo Rootless | +| `--docker` | Use cri-dockerd em vez de containerd | +| `--enable-pprof` | Habilitar ponto de extremidade pprof na porta do supervisor | +| `--prefer-bundled-bin` | Prefira binários de espaço de usuário agrupados em vez de binários de host | +| `--disable-default-registry-endpoint` | Consulte "[Default Endpoint Fallback](../installation/private-registry.md#default-endpoint-fallback)" | +| `--vpn-auth` | Consulte "[Integration with the Tailscale VPN provider](../networking/distributed-multicloud.md#integration-with-the-tailscale-vpn-provider-experimental)" | +| `--vpn-auth-file` | Consulte "[Integration with the Tailscale VPN provider](../networking/distributed-multicloud.md#integration-with-the-tailscale-vpn-provider-experimental)" | + +### Depreciado + +| Flag | Variável de Ambiente | Descrição | +| ------------------------ | -------------------- | ---------------------------- | +| `--no-flannel` | N/A | Use `--flannel-backend=none` | +| `--cluster-secret` value | `K3S_CLUSTER_SECRET` | Use `--token` | + +### Rótulos de Nós e Taints para Agentes + +Os agentes K3s podem ser configurados com as opções `--node-label` e `--node-taint` que adicionam um rótulo e taint ao kubelet. As duas opções apenas adicionam rótulos e/ou taints no momento do registro, então eles só podem ser adicionados uma vez e não alterados depois disso novamente executando comandos K3s. + +Abaixo está um exemplo mostrando como adicionar rótulos e uma contaminação: + +```bash + --node-label foo=bar \ + --node-label hello=world \ + --node-taint key1=value1:NoExecute +``` + +Se você quiser alterar os rótulos e taints dos nós após o registro do nó, você deve usar `kubectl`. Consulte a documentação oficial do Kubernetes para obter detalhes sobre como adicionar [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) e [rótulos dos nós.](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node) + +### Ajuda do CLI do Agemte K3s + +> Se uma opção aparecer entre colchetes abaixo, por exemplo `[$K3S_URL]`, significa que a opção pode ser passada como uma variável de ambiente com esse nome. + +```bash +NAME: + k3s agent - Run node agent + +USAGE: + k3s agent [OPTIONS] + +OPTIONS: + --config FILE, -c FILE (config) Load configuration from FILE (default: "/etc/rancher/k3s/config.yaml") [$K3S_CONFIG_FILE] + --debug (logging) Turn on debug logs [$K3S_DEBUG] + -v value (logging) Number for the log level verbosity (default: 0) + --vmodule value (logging) Comma-separated list of FILE_PATTERN=LOG_LEVEL settings for file-filtered logging + --log value, -l value (logging) Log to file + --alsologtostderr (logging) Log to standard error as well as file (if set) + --token value, -t value (cluster) Token to use for authentication [$K3S_TOKEN] + --token-file value (cluster) Token file to use for authentication [$K3S_TOKEN_FILE] + --server value, -s value (cluster) Server to connect to [$K3S_URL] + --data-dir value, -d value (agent/data) Folder to hold state (default: "/var/lib/rancher/k3s") [$K3S_DATA_DIR] + --node-name value (agent/node) Node name [$K3S_NODE_NAME] + --with-node-id (agent/node) Append id to node name + --node-label value (agent/node) Registering and starting kubelet with set of labels + --node-taint value (agent/node) Registering kubelet with set of taints + --image-credential-provider-bin-dir value (agent/node) The path to the directory where credential provider plugin binaries are located (default: "/var/lib/rancher/credentialprovider/bin") + --image-credential-provider-config value (agent/node) The path to the credential provider plugin config file (default: "/var/lib/rancher/credentialprovider/config.yaml") + --selinux (agent/node) Enable SELinux in containerd [$K3S_SELINUX] + --lb-server-port value (agent/node) Local port for supervisor client load-balancer. If the supervisor and apiserver are not colocated an additional port 1 less than this port will also be used for the apiserver client load-balancer. (default: 6444) [$K3S_LB_SERVER_PORT] + --protect-kernel-defaults (agent/node) Kernel tuning behavior. If set, error if kernel tunables are different than kubelet defaults. + --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use the CRI socket at the given path; when used with --docker this sets the docker socket path + --default-runtime value (agent/runtime) Set the default runtime in containerd + --image-service-endpoint value (agent/runtime) Disable embedded containerd image service and use remote image service socket at the given path. If not specified, defaults to --container-runtime-endpoint. + --pause-image value (agent/runtime) Customized pause image for containerd or docker sandbox (default: "rancher/mirrored-pause:3.6") + --snapshotter value (agent/runtime) Override default containerd snapshotter (default: "overlayfs") + --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") + --disable-default-registry-endpoint (agent/containerd) Disables containerd fallback default registry endpoint when a mirror is configured for that registry + --nonroot-devices (agent/containerd) Allows non-root pods to access devices by setting device_ownership_from_security_context=true in the containerd CRI config + --node-ip value, -i value (agent/networking) IPv4/IPv6 addresses to advertise for node + --bind-address value (listener) k3s bind address (default: 0.0.0.0) + --node-external-ip value (agent/networking) IPv4/IPv6 external IP addresses to advertise for node + --node-internal-dns value (agent/networking) internal DNS addresses to advertise for node + --node-external-dns value (agent/networking) external DNS addresses to advertise for node + --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] + --flannel-iface value (agent/networking) Override default flannel interface + --flannel-conf value (agent/networking) Override default flannel config file + --flannel-cni-conf value (agent/networking) Override default flannel cni config file + --kubelet-arg value (agent/flags) Customized flag for kubelet process + --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process + --enable-pprof (experimental) Enable pprof endpoint on supervisor port + --rootless (experimental) Run rootless + --prefer-bundled-bin (experimental) Prefer bundled userspace binaries over host binaries + --docker (agent/runtime) (experimental) Use cri-dockerd instead of containerd + --vpn-auth value (agent/networking) (experimental) Credentials for the VPN provider. It must include the provider name and join key in the format name=,joinKey=[,controlServerURL=][,extraArgs=] [$K3S_VPN_AUTH] + --vpn-auth-file value (agent/networking) (experimental) File containing credentials for the VPN provider. It must include the provider name and join key in the format name=,joinKey=[,controlServerURL=][,extraArgs=] [$K3S_VPN_AUTH_FILE] + --disable-apiserver-lb (agent/networking) (experimental) Disable the agent client-side load-balancer and connect directly to the configured server address +``` diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/certificate.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/certificate.md new file mode 100644 index 000000000..f7a539e33 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/certificate.md @@ -0,0 +1,321 @@ +--- +title: certificado +--- + +# K3s Certificado + +## Certificados de Cliente e Servidor + +Os certificados de cliente e servidor do K3s são válidos por 365 dias a partir da data de emissão. Quaisquer certificados que estejam expirados, ou dentro de 90 dias de expiração, são renovados automaticamente toda vez que o K3s é iniciado. + +### Certificados de Cliente e Servidor Rotativos + +Para rotacionar certificados de cliente e servidor manualmente, use o subcomando `k3s certificate rotate`: + +```bash +# Para K3s +systemctl stop k3s + +# Rotaciona certificados +k3s certificate rotate + +# Inicializa K3s +systemctl start k3s +``` + +Certificados individuais ou listas de certificados podem ser rotacionados especificando o nome do certificado: + +```bash +k3s certificate rotate --service , +``` + +Os seguintes certificados podem ser rotacionados: `admin`, `api-server`, `controller-manager`, `scheduler`, `k3s-controller`, `k3s-server`, `cloud-controller`, `etcd`, `auth-proxy`, `kubelet`, `kube-proxy`. + +## Certificados de Autoridade Certificadora (CA) + +O Kubernetes requer uma série de certificados CA para operação adequada. Para obter mais informações sobre como o Kubernetes usa certificados CA, consulte a documentação do Kubernetes [Certificados e requisitos de PKI](https://kubernetes.io/docs/setup/best-practices/certificates/#all-certificates). + +Por padrão, o K3s gera certificados CA autoassinados durante a inicialização do primeiro nó do servidor. Esses certificados CA são válidos por 10 anos a partir da data de emissão e não são renovados automaticamente. + +Os certificados e chaves de CA autoritativos são armazenados na chave de bootstrap do armazenamento de dados, criptografados usando o [token do servidor](token.md#server) como a senha PBKDF2 com AES256-GCM e HMAC-SHA1. +Cópias dos certificados e chaves da CA são extraídas para o disco durante a inicialização do servidor K3s. +Qualquer servidor pode gerar certificados folha para nós à medida que eles se juntam ao cluster, e os controladores do Kubernetes [API de certificados](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/) podem emitir certificados adicionais em tempo de execução. + +Para rotacionar certificados e chaves de CA, use o comando `k3s certificate rotate-ca`. +O comando executa verificações de integridade para confirmar que os certificados e chaves atualizados são utilizáveis. +Se os dados atualizados forem aceitáveis, a chave de bootstrap criptografada do datastore será atualizada, e os novos certificados e chaves serão usados ​​na próxima vez que o K3s for iniciado. +Se forem encontrados problemas durante a validação dos certificados e chaves, um erro será relatado ao log do sistema e a operação será cancelada sem alterações. + +:::info Nota de Versão +O suporte para o comando `k3s certificate rotate-ca` e a capacidade de usar certificados de CA assinados por uma CA externa estão disponíveis a partir das versões 2023-02 (v1.26.2+k3s1, v1.25.7+k3s1, v1.24.11+k3s1, v1.23.17+k3s1). +::: + +### Usando Certificados CA Customizados + +Se os certificados e chaves da CA forem encontrados no local correto durante a inicialização do primeiro servidor no cluster, a geração automática de certificados da CA será ignorada. + +Um script de exemplo para pré-criar os certificados e chaves apropriados está disponível [no repositório K3s em `contrib/util/generate-custom-ca-certs.sh`](https://github.com/k3s-io/k3s/blob/master/contrib/util/generate-custom-ca-certs.sh). +Este script deve ser executado antes de iniciar o K3s pela primeira vez e criará um conjunto completo de certificados CA folha assinados por certificados CA raiz e intermediário comuns. +Se você tiver uma CA raiz ou intermediária existente, este script pode ser usado (ou usado como um ponto de partida) para criar os certificados CA corretos para provisionar um cluster K3s com PKI enraizada em uma autoridade existente. + +Arquivos de Autoridade de Certificação Personalizados devem ser colocados em `/var/lib/rancher/k3s/server/tls`. Os seguintes arquivos são necessários: +* `server-ca.crt` +* `server-ca.key` +* `client-ca.crt` +* `client-ca.key` +* `request-header-ca.crt` +* `request-header-ca.key` + *// nota: arquivos etcd são necessários mesmo se o etcd incorporado não estiver em uso.* +* `etcd/peer-ca.crt` +* `etcd/peer-ca.key` +* `etcd/server-ca.crt` +* `etcd/server-ca.key` + *// nota: Esta é a chave privada usada para assinar tokens de conta de serviço. Ela não tem um certificado correspondente.* +* `service.key` + +#### Topologia CA Customizada + +Os certificados CA customizados devem observar a seguinte topologia: + +```mermaid +graph TD + root("Root CA") + intermediate("Intermediate CA") + server-ca("Server CA") + client-ca("Client CA") + request-header-ca("API Aggregation CA") + etcd-peer-ca("etcd Peer CA") + etcd-server-ca("etcd Server CA") + + root-hash>"Join token CA hash"] + + kube-server-certs[["Kubernetes servers
(control-plane and kubelet listeners)"]] + kube-client-certs[["Kubernetes clients
(apiserver and kubelet clients)"]] + request-header-certs[["Kubernetes API aggregation
(apiserver proxy client)"]] + etcd-peer-certs[["etcd peer client/server
(etcd replication)"]] + etcd-server-certs[["etcd client/server certificates
(Kubernetes <-> etcd)"]] + + root -.-|SHA256| root-hash + root ---> intermediate + intermediate --> server-ca ==> kube-server-certs + intermediate --> client-ca ==> kube-client-certs + intermediate --> request-header-ca ==> request-header-certs + intermediate --> etcd-peer-ca ==> etcd-peer-certs + intermediate --> etcd-server-ca ==> etcd-server-certs +``` + +#### Usando o Script de Exemplo + +:::info Importante +Se você quiser assinar os certificados de CA do cluster com uma CA raiz existente usando o script de exemplo, você deve colocar os arquivos raiz e intermediários no diretório de destino antes de executar o script. +Se os arquivos não existirem, o script criará novos certificados de CA raiz e intermediários. +::: + +Se você quiser usar apenas um certificado de CA raiz existente, forneça os seguintes arquivos: +* `root-ca.pem` +* `root-ca.key` + +Se você quiser usar certificados de CA raiz e intermediários existentes, forneça os seguintes arquivos: +* `root-ca.pem` +* `intermediate-ca.pem` +* `intermediate-ca.key` + +Para usar o script de exemplo para gerar certificados e chaves personalizados antes de iniciar o K3s, execute os seguintes comandos: +```bash +# Crie o diretório de destino para geração de certificado. +mkdir -p /var/lib/rancher/k3s/server/tls + +# Copie seu certificado CA raiz e o certificado CA intermediário+chave no local correto para o script. +# Para os propósitos deste exemplo, assumimos que você tem arquivos CA raiz e intermediário existentes em /etc/ssl. +# Se você não tiver um CA raiz e/ou intermediário existente, o script irá gerá-los para você. +cp /etc/ssl/certs/root-ca.pem /etc/ssl/certs/intermediate-ca.pem /etc/ssl/private/intermediate-ca.key /var/lib/rancher/k3s/server/tls + +# Gere certificados e chaves de CA personalizados. +curl -sL https://github.com/k3s-io/k3s/raw/master/contrib/util/generate-custom-ca-certs.sh | bash - +``` + +Se o comando for concluído com sucesso, você pode instalar e/ou iniciar o K3s pela primeira vez. +Se o script gerou arquivos CA raiz e/ou intermediários, você deve fazer backup desses arquivos para que eles possam ser reutilizados se for necessário rotacionar os certificados CA posteriormente. + +### Rotacionando Certificados CA Customizados + +Para rotacionar certificados CA personalizados, use o subcomando `k3s certificate rotate-ca`. +Os arquivos atualizados devem ser preparados em um diretório temporário, carregados no datastore e o k3s deve ser reiniciado em todos os nós para usar os certificados atualizados. + +:::warning +Você não deve sobrescrever os dados em uso no momento em `/var/lib/rancher/k3s/server/tls`. +Coloque os certificados e chaves atualizados em um diretório separado. +::: + +Um cluster que foi iniciado com certificados de CA personalizados pode renovar ou rotacionar os certificados e chaves de CA sem interrupções, desde que a mesma CA raiz seja usada. + +Se uma nova CA raiz for necessária, a rotação será disruptiva. A opção `k3s certificate rotate-ca --force` deve ser usada, todos os nós que foram unidos com um [token seguro](token.md#secure) (incluindo servidores) precisarão ser reconfigurados para usar o novo valor de token, e os pods precisarão ser reiniciados para confiar na nova CA raiz. + +#### Usando o Script de Exemplo + +O script de exemplo `generate-custom-ca-certs.sh` vinculado acima também pode ser usado para gerar certificados atualizados em um novo diretório temporário, copiando arquivos para o local correto e definindo a variável de ambiente `DATA_DIR`. +Para usar o script de exemplo para gerar certificados e chaves atualizados, execute os seguintes comandos: +```bash +# Crie um diretório temporário para geração de certificado. +mkdir -p /opt/k3s/server/tls + +# Copie seu certificado CA raiz e o certificado CA intermediário+chave no local correto para o script. +# A rotação não disruptiva requer o mesmo CA raiz que foi usado para gerar os certificados originais. +# Se os arquivos originais ainda estiverem no diretório de dados, você pode simplesmente executar: +cp /var/lib/rancher/k3s/server/tls/root-ca.* /var/lib/rancher/k3s/server/tls/intermediate-ca.* /opt/k3s/server/tls + +# Copie a chave de assinatura da conta de serviço atual para que os tokens de conta de serviço existentes não sejam invalidados. +cp /var/lib/rancher/k3s/server/tls/service.key /opt/k3s/server/tls + +# Gere certificados e chaves de CA personalizados e atualizados. +curl -sL https://github.com/k3s-io/k3s/raw/master/contrib/util/generate-custom-ca-certs.sh | DATA_DIR=/opt/k3s bash - + +# Carregue os certificados e chaves de CA atualizados no armazenamento de dados. +k3s certificate rotate-ca --path=/opt/k3s/server +``` + +Se o comando `rotate-ca` retornar um erro, verifique o log de serviço para erros. +Se o comando for concluído com sucesso, reinicie o K3s em todos os nós no cluster - servidores primeiro, depois agentes. + +Se você usou a opção `--force` ou alterou a CA raiz, certifique-se de que todos os nós que foram unidos com um [token seguro](token.md#secure) sejam reconfigurados para usar o novo valor de token, antes de serem reiniciados. +O token pode ser armazenado em um arquivo `.env`, unidade systemd ou config.yaml, dependendo de como o nó foi configurado durante a instalação inicial. + +### Rotacionando Certificados CA Autoassinados + +Para rotacionar os certificados CA autoassinados gerados pelo K3s, use o subcomando `k3s certificate rotate-ca`. +Os arquivos atualizados devem ser preparados em um diretório temporário, carregados no datastore e o k3s deve ser reiniciado em todos os nós para usar os certificados atualizados. + +:::warning +Você não deve sobrescrever os dados em uso no momento em `/var/lib/rancher/k3s/server/tls`. +Coloque os certificados e chaves atualizados em um diretório separado. +::: + +Se o cluster tiver sido iniciado com certificados CA autoassinados padrão, a rotação será disruptiva. Todos os nós que foram unidos com um [token seguro](token.md#secure) precisarão ser reconfigurados para confiar no novo hash CA. +Se os novos certificados CA não forem assinados de forma cruzada pelos antigos certificados CA, você precisará usar a opção `--force` para ignorar as verificações de integridade, e os pods precisarão ser reiniciados para confiar no novo CA raiz. + +#### Topologia CA Padrão +Os certificados CA autoassinados padrão têm a seguinte topologia: + +```mermaid +graph TD + server-ca("Server CA") + client-ca("Client CA") + request-header-ca("API Aggregation CA") + etcd-peer-ca("etcd Peer CA") + etcd-server-ca("etcd Server CA") + + root-hash>"Join token CA hash"] + + kube-server-certs[["Kubernetes servers
(control-plane and kubelet listeners)"]] + kube-client-certs[["Kubernetes clients
(apiserver and kubelet clients)"]] + request-header-certs[["Kubernetes API aggregation
(apiserver proxy client)"]] + etcd-peer-certs[["etcd peer client/server
(etcd replication)"]] + etcd-server-certs[["etcd client/server certificates
(Kubernetes <-> etcd)"]] + + server-ca -.-|SHA256| root-hash + server-ca ===> kube-server-certs + client-ca ===> kube-client-certs + request-header-ca ===> request-header-certs + etcd-peer-ca ===> etcd-peer-certs + etcd-server-ca ===> etcd-server-certs +``` + +Ao rotacionar as CAs autoassinadas padrão, uma topologia de certificado modificada com CAs intermediárias e uma nova CA raiz assinada pela CA antiga pode ser usada para que haja uma cadeia contínua de confiança entre as CAs antigas e novas: +```mermaid +graph TD + server-ca-old("Server CA
(old)") + client-ca-old("Client CA
(old)") + request-header-ca-old("API Aggregation CA
(old)") + etcd-peer-ca-old("etcd Peer CA
(old)") + etcd-server-ca-old("etcd Server CA
(old)") + + root-hash>"Join token CA hash"] + + server-ca-xsigned("Server CA
(cross-signed)") + client-ca-xsigned("Client CA
(cross-signed)") + request-header-ca-xsigned("API Aggregation CA
(cross-signed)") + etcd-peer-ca-xsigned("etcd Peer CA
(cross-signed)") + etcd-server-ca-xsigned("etcd Server CA
(cross-signed)") + + server-ca-ssigned("Server CA
(self-signed)") + client-ca-ssigned("Client CA
(self-signed)") + request-header-ca-ssigned("API Aggregation CA
(self-signed)") + etcd-peer-ca-ssigned("etcd Peer CA
(self-signed)") + etcd-server-ca-ssigned("etcd Server CA
(self-signed)") + + server-ca("Intermediate
Server CA") + client-ca("Intermediate
Client CA") + request-header-ca("Intermediate
API Aggregation CA") + etcd-peer-ca("Intermediate
etcd Peer CA") + etcd-server-ca("Intermediate
etcd Server CA") + + kube-server-certs[["Kubernetes servers
(control-plane and kubelet listeners)"]] + kube-client-certs[["Kubernetes clients
(apiserver and kubelet clients)"]] + request-header-certs[["Kubernetes API aggregation
(apiserver proxy client)"]] + etcd-peer-certs[["etcd peer client/server
(etcd replication)"]] + etcd-server-certs[["etcd client/server certificates
(Kubernetes <-> etcd)"]] + + server-ca-ssigned -.-|SHA256| root-hash + server-ca-ssigned --> server-ca ==> kube-server-certs + server-ca-old --> server-ca-xsigned --> server-ca + client-ca-ssigned --> client-ca ==> kube-client-certs + client-ca-old --> client-ca-xsigned --> client-ca + request-header-ca-ssigned --> request-header-ca ==> request-header-certs + request-header-ca-old --> request-header-ca-xsigned --> request-header-ca + etcd-peer-ca-ssigned --> etcd-peer-ca ==> etcd-peer-certs + etcd-peer-ca-old --> etcd-peer-ca-xsigned --> etcd-peer-ca + etcd-server-ca-ssigned --> etcd-server-ca ==> etcd-server-certs + etcd-server-ca-old --> etcd-server-ca-xsigned --> etcd-server-ca +``` + +#### Usando o Script de Exemplo + +Um script de exemplo para criar certificados CA atualizados e chaves assinadas cruzadamente pelas CAs existentes está disponível [no repositório K3s em `contrib/util/rotate-default-ca-certs.sh`](https://github.com/k3s-io/k3s/blob/master/contrib/util/rotate-default-ca-certs.sh). + +Para usar o script de exemplo para gerar certificados autoassinados atualizados que são assinados de forma cruzada pelas CAs existentes, execute os seguintes comandos: +```bash +# Crie certificados e chaves CA atualizados, assinados de forma cruzada pelos CAs atuais. +# Este script criará um novo diretório temporário contendo os certificados atualizados e produzirá os novos valores de token. +curl -sL https://github.com/k3s-io/k3s/raw/master/contrib/util/rotate-default-ca-certs.sh | bash - + +# Carregue os certificados atualizados no armazenamento de dados; veja a saída do script para os valores de token atualizados. +k3s certificate rotate-ca --path=/var/lib/rancher/k3s/server/rotate-ca +``` + +Se o comando `rotate-ca` retornar um erro, verifique se há erros no log de serviço. +Se o comando for concluído com sucesso, reinicie o K3s em todos os nós do cluster - servidores primeiro, depois agentes. + +Certifique-se de que todos os nós que foram unidos com um [token seguro](token.md#secure), incluindo outros nós de servidor, sejam reconfigurados para usar o novo valor de token antes de serem reiniciados. +O token pode ser armazenado em um arquivo `.env`, unidade systemd ou config.yaml, dependendo de como o nó foi configurado durante a instalação inicial. + +## Rotação de Chaves do Emissor da Conta de Serviço + +A chave do emissor da conta de serviço é uma chave privada RSA usada para assinar tokens de conta de serviço. +Ao rotacionar a chave do emissor da conta de serviço, pelo menos uma chave antiga deve ser mantida no arquivo para que os tokens de conta de serviço existentes não sejam invalidados. +Ela pode ser rotacionada independentemente das CAs do cluster usando o `k3s certificate rotate-ca` para instalar apenas um arquivo `service.key` atualizado que inclua as chaves novas e antigas. + +:::warning +Você não deve sobrescrever os dados em uso no momento em `/var/lib/rancher/k3s/server/tls`. +Coloque a chave atualizada em um diretório separado. +::: + +Por exemplo, para girar apenas a chave do emissor da conta de serviço, execute os seguintes comandos: +```bash +# Crie um diretório temporário para geração de certificado +mkdir -p /opt/k3s/server/tls + +# Verifique a versão do OpenSSL +openssl version | grep -qF 'OpenSSL 3' && OPENSSL_GENRSA_FLAGS=-traditional + +# Gerar uma nova chave +openssl genrsa ${OPENSSL_GENRSA_FLAGS:-} -out /opt/k3s/server/tls/service.key 2048 + +# Adicione a chave existente para evitar invalidar os tokens atuais +cat /var/lib/rancher/k3s/server/tls/service.key >> /opt/k3s/server/tls/service.key + +# Carregue a chave atualizada no armazenamento de dados +k3s certificate rotate-ca --path=/opt/k3s/server +``` + +É normal ver avisos para arquivos que não estão sendo atualizados. Se o comando `rotate-ca` retornar um erro, verifique o log de serviço para erros. +Se o comando for concluído com sucesso, reinicie o K3s em todos os servidores no cluster. Não é necessário reiniciar agentes ou reiniciar pods. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/cli.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/cli.md new file mode 100644 index 000000000..74e4095b0 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/cli.md @@ -0,0 +1,19 @@ +--- +title: Ferramentas CLI +--- + +O binário K3s contém uma série de ferramentas adicionais que ajudam você a gerenciar seu cluster. + +| Comando | Descrição | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `k3s server` | Execute um nó de servidor K3s, que inicia os componentes `apiserver`, `scheduler`, `controller-manager` e `cloud-controller-manager` do Kubernetes, além de um datastore e os componentes do agente. Veja a [documentação do comando `k3s server`](server.md) para mais informações. | +| `k3s agent` | Execute o nó do agente K3s, que inicia o controlador de política de rede `containerd`, `flannel`, `kube-router` e os componentes `kubelet` e `kube-proxy` do Kubernetes. Veja a [documentação do comando `k3s agent`](agent.md) para obter mais informações. | +| `k3s kubectl` | Execute o comando incorporado [`kubectl`](https://kubernetes.io/docs/reference/kubectl). Esta é uma CLI para interagir com o apiserver do Kubernetes. Se a variável de ambiente `KUBECONFIG` não estiver definida, isso tentará usar automaticamente o kubeconfig em `/etc/rancher/k3s/k3s.yaml`. | +| `k3s crictl` | Execute o comando incorporado [`crictl`](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md). Esta é uma CLI para interagir com a interface de tempo de execução do contêiner (CRI) do Kubernetes. Útil para depuração. | +| `k3s ctr` | Execute o comando incorporado [`ctr`](https://github.com/projectatomic/containerd/blob/master/docs/cli.md). Este é um CLI para containerd, o daemon de contêiner usado pelo K3s. Útil para depuração. | +| `k3s token` | Gerenciar tokens bootstrap. Veja a [documentação do comando `k3s token`](token.md) para mais informações. | +| `k3s etcd-snapshot` | Execute backups sob demanda dos dados do cluster K3s e carregue no S3. Veja a [documentação do comando `k3s etcd-snapshot`](etcd-snapshot.md) para mais informações. | +| `k3s secrets-encrypt` | Configure o K3s para criptografar segredos ao armazená-los no cluster. Veja a [documentação do comando `k3s secrets-encrypt`](secrets-encrypt.md) para mais informações. | +| `k3s certificate` | Gerenciar certificados K3s. Veja a [documentação do comando `k3s certificate`](certificate.md) para mais informações. | +| `k3s completion` | Gerar scripts de conclusão de shell para k3s | +| `k3s help` | Mostra uma lista de comandos ou ajuda para um comando | diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/etcd-snapshot.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/etcd-snapshot.md new file mode 100644 index 000000000..19f827a73 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/etcd-snapshot.md @@ -0,0 +1,307 @@ +--- +title: etcd-snapshot +--- + +# k3s etcd-snapshot + +Esta página documenta o gerenciamento de snapshots do etcd usando a CLI `k3s etcd-snapshot`, bem como a configuração de snapshots agendados do etcd para o processo `k3s server` e o uso do comando `k3s server --cluster-reset` para redefinir a associação do cluster etcd e, opcionalmente, restaurar snapshots do etcd. + +## Criando Snapshots + +Os snapshots são salvos no caminho definido pelo valor `--etcd-snapshot-dir` do servidor, que tem como padrão `${data-dir}/server/db/snapshots`. O valor data-dir tem como padrão `/var/lib/rancher/k3s` e pode ser alterado independentemente definindo o sinalizador `--data-dir`. + +### Snapshots Agendados + +Os snapshots agendados são habilitados por padrão, às 00:00 e 12:00, horário do sistema, com 5 snapshots retidos. Para configurar o intervalo de snapshot ou o número de snapshots retidos, consulte as [opções de configuração de snapshot](#snapshot-configuration-options). + +Os snapshots agendados têm um nome que começa com `etcd-snapshot`, seguido pelo nome do nó e timestamp. O nome base pode ser alterado com o sinalizador `--etcd-snapshot-name` na configuração do servidor. + +### Snapshots Sob Demanda + +Os snapshots podem ser salvos manualmente executando o comando `k3s etcd-snapshot save`. + +Os snapshots sob demanda têm um nome que começa com `on-demand`, seguido pelo nome do nó e pelo registro de data e hora. O nome base pode ser alterado com o sinalizador `--name` ao salvar o snapshot. + +### Opções de Configuração do Snapshot + +Esses sinalizadores podem ser passados ​​para o comando `k3s server` para redefinir o cluster etcd e, opcionalmente, restaurar a partir de um snapshot. + +| Flag | Descrição | +| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--cluster-reset` | Esqueça todos os pares e torne-se o único membro de um novo cluster. Isso também pode ser definido com a variável de ambiente `[$K3S_CLUSTER_RESET]` | +| `--cluster-reset-restore-path` | Caminho para o arquivo de instantâneo a ser restaurado | + +Esses sinalizadores são válidos para `k3s server` e `k3s etcd-snapshot`, no entanto, quando passados ​​para `k3s etcd-snapshot`, o prefixo `--etcd-` pode ser omitido para evitar redundância. +Os sinalizadores podem ser passados ​​com a linha de comando ou no [arquivo de configuração,](../installation/configuration.md#configuration-file ) que pode ser mais fácil de usar. + +| Flag | Descrição | +| ------------------------------- | ------------------------------------------------------------------------------------------------------ | +| `--etcd-disable-snapshots` | Desativar snapshots agendados | +| `--etcd-snapshot-compress` | Compactação etcd snapshots | +| `--etcd-snapshot-dir` | Diretório para salvar instantâneos do banco de dados. (Localização padrão: `${data-dir}/db/snapshots`) | +| `--etcd-snapshot-retention` | Número de snapshots para reter (default: 5) | +| `--etcd-snapshot-schedule-cron` | Tempo de intervalo da execução do snapshot, ex: a cada 5 horas `0 */5 * * *` (default: `0 */12 * * *`) | + +### Suporte de armazenamento de objetos compatível com S3 + +O K3s suporta a gravação de snapshots etcd e a restauração de snapshots etcd de armazenamentos de objetos compatíveis com S3. O suporte S3 está disponível para snapshots sob demanda e agendados. + +| Flag | Descrição | +| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--etcd-s3` | Habilitar backup para S3 | +| `--etcd-s3-endpoint` | S3 endpoint url | +| `--etcd-s3-endpoint-ca` | Certificado CA customizado para endpoint S3 | +| `--etcd-s3-skip-ssl-verify` | Desativa a validação de certificado SSL com S3 | +| `--etcd-s3-access-key` | Chave de acesso S3 | +| `--etcd-s3-secret-key` | Segredo de acesso S3 | +| `--etcd-s3-bucket` | Nome do bucket S3 | +| `--etcd-s3-region` | Região S3 / localização do bucket (opcional). O padrão é us-east-1 | +| `--etcd-s3-folder` | Pasta S3 | +| `--etcd-s3-proxy` | Servidor proxy a ser usado ao conectar ao S3, substituindo quaisquer variáveis ​​de ambiente relacionadas ao proxy | +| `--etcd-s3-insecure` | Desabilita S3 sobre HTTPS | +| `--etcd-s3-timeout` | S3 timeout (default: `5m0s`) | +| `--etcd-s3-config-secret` | Secret name no namespace kube-system usado para configurar o S3, se o etcd-s3 estiver habilitado e nenhuma outra opção do etcd-s3 estiver definida | + +Para executar um instantâneo etcd sob demanda e salvá-lo no S3: + +```bash +k3s etcd-snapshot save \ + --s3 \ + --s3-bucket= \ + --s3-access-key= \ + --s3-secret-key= +``` + +Para executar uma restauração de snapshot etcd sob demanda do S3, primeiro certifique-se de que o K3s não esteja em execução. Em seguida, execute os seguintes comandos: + +```bash +k3s server \ + --cluster-init \ + --cluster-reset \ + --etcd-s3 \ + --cluster-reset-restore-path= \ + --etcd-s3-bucket= \ + --etcd-s3-access-key= \ + --etcd-s3-secret-key= +``` + +### Suporte Secret de Configuração S3 + +:::info Nota de Versão +O suporte ao S3 Configuration Secret está disponível a partir das versões de agosto de 2024: v1.30.4+k3s1, v1.29.8+k3s1, v1.28.13+k3s1 +::: + +O K3s suporta a leitura da configuração de snapshot etcd S3 de um Kubernetes Secret. +Isso pode ser preferível à codificação rígida de credenciais em flags CLI do K3s ou arquivos de configuração por motivos de segurança, ou se as credenciais precisarem ser rotacionadas sem reiniciar o K3s. +Para passar a configuração de snapshot S3 por meio de um Secret, inicie o K3s com `--etcd-s3` e `--etcd-s3-config-secret=`. +O Secret não precisa existir quando o K3s é iniciado, mas será verificado sempre que uma operação de salvar/listar/excluir/remoção de snapshot for realizada. + +O S3 config Secret não pode ser usado ao restaurar um snapshot, pois o apiserver não está disponível para fornecer o segredo durante uma restauração. +A configuração do S3 deve ser passada via CLI ao restaurar um snapshot armazenado no S3. + +:::note +Passe apenas os flags `--etcd-s3` e `--etcd-s3-config-secret` para habilitar o Secret. +Se quaisquer outros flags de configuração S3 forem definidos, o Secret será ignorado. +::: + +As chaves no Secret correspondem aos sinalizadores CLI `--etcd-s3-*` listados acima. +A chave `etcd-s3-endpoint-ca` aceita um pacote CA codificado em PEM, ou a chave `etcd-s3-endpoint-ca-name` pode ser usada para especificar o nome de um ConfigMap no namespace `kube-system` contendo um ou mais pacotes CA codificados em PEM. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: k3s-etcd-snapshot-s3-config + namespace: kube-system +type: etcd.k3s.cattle.io/s3-config-secret +stringData: + etcd-s3-endpoint: "" + etcd-s3-endpoint-ca: "" + etcd-s3-endpoint-ca-name: "" + etcd-s3-skip-ssl-verify: "false" + etcd-s3-access-key: "AWS_ACCESS_KEY_ID" + etcd-s3-secret-key: "AWS_SECRET_ACCESS_KEY" + etcd-s3-bucket: "bucket" + etcd-s3-folder: "folder" + etcd-s3-region: "us-east-1" + etcd-s3-insecure: "false" + etcd-s3-timeout: "5m" + etcd-s3-proxy: "" +``` + +## Gerenciado Snapshots + +O k3s suporta um conjunto de subcomandos para trabalhar com seus instantâneos do etcd. + +| Subcomando | Descrição | +| ----------- | ---------------------------------------------------------------- | +| delete | Excluir snapshots | +| ls, list, l | Listar snapshots | +| prune | Remover snapshots que excedem a contagem de retenção configurada | +| save | Aciona de imediato snapshot do etcd | + +Esses comandos funcionarão conforme o esperado, independentemente de os snapshots etcd serem armazenados localmente ou em um armazenamento de objetos compatível com S3. + +Para obter informações adicionais sobre os subcomandos etcd snapshot, execute `k3s etcd-snapshot --help`. + +Excluir um snapshot do S3. + +```bash +k3s etcd-snapshot delete \ + --s3 \ + --s3-bucket= \ + --s3-access-key= \ + --s3-secret-key= \ + +``` + +Podar snapshots locais com a política de retenção padrão (5). O subcomando `prune` recebe um sinalizador adicional `--snapshot-retention` que permite substituir a política de retenção padrão. + +```bash +k3s etcd-snapshot prune +``` + +```bash +k3s etcd-snapshot prune --snapshot-retention 10 +``` + +### Recursos Customizados ETCDSnapshotFile + +:::info Nota de Versão +Os arquivos ETCDSnapshot estão disponíveis a partir das versões de novembro de 2023: v1.28.4+k3s2, v1.27.8+k3s2, v1.26.11+k3s2, v1.25.16+k3s4 +::: + +Os snapshots podem ser visualizados remotamente usando qualquer cliente Kubernetes listando ou descrevendo recursos `ETCDSnapshotFile` com escopo de cluster. +Ao contrário do comando `k3s etcd-snapshot list`, que mostra apenas snapshots visíveis para aquele nó, os recursos `ETCDSnapshotFile` rastreiam todos os snapshots presentes nos membros do cluster. + +```console +root@k3s-server-1:~# kubectl get etcdsnapshotfile +NAME SNAPSHOTNAME NODE LOCATION SIZE CREATIONTIME +local-on-demand-k3s-server-1-1730308816-3e9290 on-demand-k3s-server-1-1730308816 k3s-server-1 file:///var/lib/rancher/k3s/server/db/snapshots/on-demand-k3s-server-1-1730308816 2891808 2024-10-30T17:20:16Z +s3-on-demand-k3s-server-1-1730308816-79b15c on-demand-k3s-server-1-1730308816 s3 s3://etcd/k3s-test/on-demand-k3s-server-1-1730308816 2891808 2024-10-30T17:20:16Z +``` + +```console +root@k3s-server-1:~# kubectl describe etcdsnapshotfile s3-on-demand-k3s-server-1-1730308816-79b15c +Name: s3-on-demand-k3s-server-1-1730308816-79b15c +Namespace: +Labels: etcd.k3s.cattle.io/snapshot-storage-node=s3 +Annotations: etcd.k3s.cattle.io/snapshot-token-hash: b4b83cda3099 +API Version: k3s.cattle.io/v1 +Kind: ETCDSnapshotFile +Metadata: + Creation Timestamp: 2024-10-30T17:20:16Z + Finalizers: + wrangler.cattle.io/managed-etcd-snapshots-controller + Generation: 1 + Resource Version: 790 + UID: bec9a51c-dbbe-4746-922e-a5136bef53fc +Spec: + Location: s3://etcd/k3s-test/on-demand-k3s-server-1-1730308816 + Node Name: s3 + s3: + Bucket: etcd + Endpoint: s3.example.com + Prefix: k3s-test + Region: us-east-1 + Skip SSL Verify: true + Snapshot Name: on-demand-k3s-server-1-1730308816 +Status: + Creation Time: 2024-10-30T17:20:16Z + Ready To Use: true + Size: 2891808 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ETCDSnapshotCreated 113s k3s-supervisor Snapshot on-demand-k3s-server-1-1730308816 saved on S3 +``` + + +## Restaurando Snapshots + +O K3s executa várias etapas ao restaurar um snapshot: +1. Se o snapshot estiver armazenado no S3, o arquivo será baixado para o diretório do snapshot. +2. Se o snapshot estiver compactado, ele será descompactado. +3. Se presentes, os arquivos de banco de dados etcd atuais serão movidos para `${data-dir}/server/db/etcd-old-$TIMESTAMP/`. +4. O conteúdo do snapshot é extraído para o disco e a soma de verificação é verificada. +5. O etcd é iniciado e todos os membros do cluster etcd, exceto o nó atual, são removidos do cluster. +6. Os certificados CA e outros dados confidenciais são extraídos do armazenamento de dados e gravados no disco para uso posterior. +7. A restauração é concluída e o K3s pode ser reiniciado e usado normalmente no servidor onde a restauração foi realizada. +8. (opcional) Os agentes e servidores do plano de controle podem ser iniciados normalmente. +8. (opcional) Os servidores Etcd podem ser reiniciados para se juntarem novamente ao cluster após a remoção dos arquivos antigos do banco de dados. + +### Etapas para Restauração do Snapshot + +Selecione a aba abaixo que corresponde à configuração do seu cluster. + + + + +1. Pare o serviço K3s: + ```bash + systemctl stop k3s + ``` + +2. Execute `k3s server` com o sinalizador `--cluster-reset` e `--cluster-reset-restore-path` indicando o caminho para o snapshot a ser restaurado. + Se o snapshot estiver armazenado no S3, forneça sinalizadores de configuração do S3 (`--etcd-s3`, `--etcd-s3-bucket` e assim por diante) e forneça apenas o nome do arquivo do snapshot como o caminho de restauração. + + :::note + Usar o sinalizador `--cluster-reset` sem especificar um snapshot para restaurar simplesmente redefine o cluster etcd para um único membro sem restaurar um snapshot. + ::: + + ```bash + k3s server \ + --cluster-reset \ + --cluster-reset-restore-path= + ``` + + **Resultado:** O K3s restaura o snapshot e redefine a associação ao cluster, então imprime uma mensagem indicando que ele está pronto para ser reiniciado: + `A associação ao cluster etcd gerenciado foi redefinida, reinicie sem o sinalizador --cluster-reset agora.` + +3. Inicie K3s novamente: + ```bash + systemctl start k3s + ``` + + + +Neste exemplo, há 3 servidores, `S1`, `S2` e `S3`. O snapshot está localizado em `S1`. + +1. Para K3s em todos os servidores: + ```bash + systemctl stop k3s + ``` + +2. No S1, execute `k3s server` com a opção `--cluster-reset` e `--cluster-reset-restore-path` indicando o caminho para o snapshot a ser restaurado. + Se o snapshot estiver armazenado no S3, forneça os sinalizadores de configuração do S3 (`--etcd-s3`, `--etcd-s3-bucket` e assim por diante) e forneça apenas o nome do arquivo do snapshot como o caminho de restauração. + + :::note + Usar o sinalizador `--cluster-reset` sem especificar um snapshot para restaurar simplesmente redefine o cluster etcd para um único membro sem restaurar um snapshot. + ::: + + ```bash + k3s server \ + --cluster-reset \ + --cluster-reset-restore-path= + ``` + + **Resultado:** O K3s restaura o snapshot e redefine a associação ao cluster, então imprime uma mensagem indicando que ele está pronto para ser reiniciado: + `A associação ao cluster etcd gerenciado foi redefinida, reinicie sem o sinalizador --cluster-reset agora.` + `Faça backup e exclua ${datadir}/server/db em cada servidor etcd peer e reuna os nós.` + +1. No S1, comece K3s novamente: + ```bash + systemctl start k3s + ``` + +2. No S2 e S3, exclua o diretório de dados, `/var/lib/rancher/k3s/server/db/`: + ```bash + rm -rf /var/lib/rancher/k3s/server/db/ + ``` + +3. Em S2 e S3, inicie o K3s novamente para ingressar no cluster restaurado: + ```bash + systemctl start k3s + ``` + + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/secrets-encrypt.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/secrets-encrypt.md new file mode 100644 index 000000000..414a1b691 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/secrets-encrypt.md @@ -0,0 +1,336 @@ +--- +title: secrets-encrypt +--- + +# k3s secrets-encrypt + +O K3s suporta habilitar a criptografia de segredos em repouso. Para obter mais informações, consulte [Criptografia de secrets](../security/secrets-encryption.md). + +## Ferramenta de Criptografia Secrets + +:::info Nota de Versão +Disponível a partir de [v1.21.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.21.8%2Bk3s1) +::: + +O K3s contém uma ferramenta CLI `secrets-encrypt`, que permite o controle automático sobre o seguinte: + +- Desabilitando/Habilitando a criptografia de secrets +- Adicionando novas secrets de criptografia +- Girando e excluindo secrets de criptografia +- Recriptografando secrets + +:::warning +Não seguir o procedimento adequado para rotacionar chaves de criptografia pode deixar seu cluster permanentemente corrompido. Prossiga com cautela. +::: + +### Nova Rotação de Chave de Criptografia (Experimental) + +:::info Nota de Versão +Disponível a partir de [v1.28.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.1%2Bk3s1). Esta nova versão da ferramenta utilizou o K8s [recarregamento automático de configuração](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#configure-automatic-reloading) que está atualmente em beta. O GA é esperado na v1.29.0 + +Para versões mais antigas, consulte [Encryption Key Rotation Classic](#encryption-key-rotation-classic) +::: + + + +Para rotacionar chaves de criptografia de segredos em um cluster de servidor único: + +1. Inicie o servidor K3s com o sinalizador `--secrets-encryption` + + :::note + Atualmente, *não* há suporte para iniciar o K3s sem criptografia e habilitá-lo posteriormente. + ::: + +2. Gire as chaves de criptografia de secrets + ``` + k3s secrets-encrypt rotate-keys + ``` + +3. Aguarde a conclusão da reencriptação. Observe os logs do servidor ou aguarde: + ```bash + $ k3s secrets-encrypt status + Encryption Status: Enabled + Current Rotation Stage: reencrypt_finished + ``` + + + + +Para rotacionar chaves de criptografia secrets em configurações de HA: + + +1. Inicie todos os três servidores K3s com o sinalizador `--secrets-encryption`. Para resumir, os servidores serão chamados de S1, S2, S3. + + :::note + Atualmente, *não* há suporte para iniciar o K3s sem criptografia e habilitá-lo posteriormente. + ::: + +2. Rotaciona as secrets de criptografia no S1 + + ```bash + k3s secrets-encrypt rotate-keys + ``` + +3. Aguarde a conclusão da reencriptação. Observe os logs do servidor ou aguarde: + ```bash + $ k3s secrets-encrypt status + Encryption Status: Enabled + Current Rotation Stage: reencrypt_finished + ``` + :::info + K3s criptografará novamente ~5 secrets por segundo. Clusters com grande número de secrets podem levar vários minutos para criptografar novamente. Você pode acompanhar o progresso nos logs do servidor. + ::: + +4. Reinicie o K3s no S1 com os mesmos argumentos. Se estiver executando o K3s como um serviço: + ```bash + # If using systemd + systemctl restart k3s + # If using openrc + rc-service k3s restart + ``` + +5. Assim que o S1 estiver ativo, reinicie os K3s no S2 e S3 + + + + + +### Rotação de Chave de Criptografia Clássica + + + + +Para rotacionar chaves de criptografia de segredos em um cluster de servidor único: + +1. Inicie o servidor K3s com a flag `--secrets-encryption` + + :::note + Atualmente, *não* há suporte para iniciar o K3s sem criptografia e habilitá-lo posteriormente. + ::: + +2. Prepare + + ```bash + k3s secrets-encrypt prepare + ``` + +3. Mate e reinicie o servidor K3s com os mesmos argumentos. Se estiver executando o K3s como um serviço: + ```bash + # If using systemd + systemctl restart k3s + # If using openrc + rc-service k3s restart + ``` + +4. Rotacione + + ```bash + k3s secrets-encrypt rotate + ``` + +5. Mate e reinicie o servidor K3s com os mesmos argumentos +6. Recriptografar + :::info + K3s criptografará novamente ~5 secrets por segundo. + Clusters com grande número de secrets podem levar vários minutos para criptografar novamente. + ::: + ```bash + k3s secrets-encrypt reencrypt + ``` + + + + + +As etapas são as mesmas para clusters de DB incorporados e DB externos. + +Para rotacionar chaves de criptografia de segredos em configurações de HA: + + + +1. Inicie todos os três servidores K3s com o sinalizador `--secrets-encryption`. Para resumir, os servidores serão chamados de S1, S2, S3. + :::note Nota + - Iniciar o K3s sem criptografia e habilitá-lo posteriormente *não* é suportado atualmente. + - Embora não seja obrigatório, é recomendado que você escolha um nó de servidor do qual executar os comandos `secrets-encrypt`. + ::: + +2. Prepare o S1 + + ```bash + k3s secrets-encrypt prepare + ``` + +3. Mate e reinicie S1 com os mesmos argumentos. Se estiver executando K3s como um serviço: + ```bash + # Se estiver usando systemd + systemctl restart k3s + # Se estiver usando openrc + rc-service k3s restart + ``` + +4. Assim que o S1 estiver ativo, mate e reinicie o S2 e o S3 + +5. Rotacione no S1 + + ```bash + k3s secrets-encrypt rotate + ``` + +6. Mate e reinicie o S1 com os mesmos argumentos +7. Assim que o S1 estiver ativo, mate e reinicie o S2 e o S3 + +8. Recriptografar em S1 + :::info + K3s criptografará novamente ~5 secrets por segundo. + Clusters com grande número de secrets podem levar vários minutos para criptografar novamente. + ::: + ```bash + k3s secrets-encrypt reencrypt + ``` + +9. Mate e reinicie o S1 com os mesmos argumentos +10. Assim que o S1 estiver ativo, mate e reinicie o S2 e o S3 + + + + +### Desativar/Reativar Criptografia de Segredos + + + +Após iniciar um servidor com o sinalizador `--secrets-encryption`, a criptografia de segredos pode ser desabilitada. + +Para desabilitar a criptografia de segredos em um cluster de nó único: + +1. Desabilite + + ```bash + k3s secrets-encrypt disable + ``` + +2. Mate e reinicie o servidor K3s com os mesmos argumentos. Se estiver executando o K3s como um serviço: + ```bash + # Se estiver usando systemd + systemctl restart k3s + # Se estiver usando openrc + rc-service k3s restart + ``` + +3. Recriptografar com Flags + + ```bash + k3s secrets-encrypt reencrypt --force --skip + ``` + +Para reativar a criptografia de secrets em um cluster de nó único: + +1. Habilitar + + ```bash + k3s secrets-encrypt enable + ``` + +2. Mate e reinicie o servidor K3s com os mesmos argumentos + +3. Recriptografar com flags + + ```bash + k3s secrets-encrypt reencrypt --force --skip + ``` + + + + +Após iniciar um cluster HA com sinalizadores `--secrets-encryption`, a criptografia de secrets pode ser desabilitada. + +:::note +Embora não seja obrigatório, é recomendável que você escolha um nó de servidor para executar os comandos `secrets-encrypt`. +::: + +Para resumir, os três servidores usados ​​neste guia serão chamados de S1, S2, S3. + +Para desabilitar a criptografia de secrets em um cluster HA: + +1. Desabilitar em S1 + + ```bash + k3s secrets-encrypt disable + ``` + +2. Mate e reinicie S1 com os mesmos argumentos. Se estiver executando K3s como um serviço: + ```bash + # Se estiver usando systemd + systemctl restart k3s + # If using openrc + rc-service k3s restart + ``` + +3. Assim que o S1 estiver ativo, mate e reinicie o S2 e o S3 + + +4. Recriptografar com flags em S1 + + ```bash + k3s secrets-encrypt reencrypt --force --skip + ``` + +Para reativar a criptografia de secrets em um cluster HA: + +1. Habilitar em S1 + + ```bash + k3s secrets-encrypt enable + ``` + +2. Mate e reinicie o S1 com os mesmos argumentos +3. Assim que o S1 estiver ativo, mate e reinicie o S2 e o S3 + +4. Recriptografar com flags em S1 + + ```bash + k3s secrets-encrypt reencrypt --force --skip + ``` + + + + +### Status de Criptografia Secrets +A ferramenta secrets-encrypt inclui um comando `status` que exibe informações sobre o status atual da criptografia de secrets no nó. + +Um exemplo do comando em um nó de servidor único: +```bash +$ k3s secrets-encrypt status +Encryption Status: Enabled +Current Rotation Stage: start +Server Encryption Hashes: All hashes match + +Active Key Type Name +------ -------- ---- + * AES-CBC aescbckey + +``` + +Outro exemplo no cluster HA, após rotacionar as chaves, mas antes de reiniciar os servidores: +```bash +$ k3s secrets-encrypt status +Encryption Status: Enabled +Current Rotation Stage: rotate +Server Encryption Hashes: hash does not match between node-1 and node-2 + +Active Key Type Name +------ -------- ---- + * AES-CBC aescbckey-2021-12-10T22:54:38Z + AES-CBC aescbckey + +``` + +Os detalhes de cada seção são os seguintes: + +- __Encryption Status__: Exibido se a criptografia de segredos está desabilitada ou habilitada no nó +- __Current Rotation Stage__: Indica o estágio de rotação atual no nó. + Os estágios são: `start`, `prepare`, `rotate`, `reencrypt_request`, `reencrypt_active`, `reencrypt_finished` +- __Server Encryption Hashes__: Útil para clusters HA, isso indica se todos os servidores estão no mesmo estágio com seus arquivos locais. Isso pode ser usado para identificar se uma reinicialização dos servidores é necessária antes de prosseguir para o próximo estágio. No exemplo de HA acima, node-1 e node-2 têm hashes diferentes, indicando que eles atualmente não têm a mesma configuração de criptografia. Reiniciar os servidores sincronizará suas configurações. +- __Key Table__: Resume informações sobre as chaves de criptografia secretas encontradas no nó. + * __Active__: O "*" indica quais, se houver, das chaves são usadas atualmente para criptografia de segredos. Uma chave ativa é usada pelo Kubernetes para criptografar quaisquer novos segredos. + * __Key Type__: Todas as chaves que usam esta ferramenta são do tipo `AES-CBC`. Veja mais informações [aqui.](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers) + * __Name__: Nome da chave de criptografia. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/server.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/server.md new file mode 100644 index 000000000..3ecb1107d --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/server.md @@ -0,0 +1,314 @@ +--- +title: servidor +--- + +# K3s Servidor + +Nesta seção, você aprenderá como configurar o servidor K3s. + +Observe que os servidores também executam um agente, portanto, todas as opções de configuração listadas na [documentação do `k3s agent`](agent.md) também são suportadas nos servidores. + +As opções são documentadas nesta página como sinalizadores CLI, mas também podem ser passadas como opções de arquivo de configuração. Veja a documentação do [Arquivo de configuração](../installation/configuration.md#configuration-file) para mais informações sobre o uso de arquivos de configuração YAML. + +## Valores Críticos de Configuração + +As seguintes opções devem ser definidas com o mesmo valor em todos os servidores no cluster. Não fazer isso fará com que novos servidores não consigam ingressar no cluster ao usar etcd incorporado, ou operação incorreta do cluster ao usar um datastore externo. + +* `--agent-token` +* `--cluster-cidr` +* `--cluster-dns` +* `--cluster-domain` +* `--disable-cloud-controller` +* `--disable-helm-controller` +* `--disable-network-policy` +* `--disable=servicelb` *nota: outros componentes empacotados podem ser desabilitados por servidor* +* `--egress-selector-mode` +* `--embedded-registry` +* `--flannel-backend` +* `--flannel-external-ip` +* `--flannel-ipv6-masq` +* `--secrets-encryption` +* `--service-cidr` + +## Opções Comumente Usadas + +### Banco de Dados + +| Flag | Variável de Ambiente | Valor Padrão | Descrição | +| ------------------------------------- | ------------------------ | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `--datastore-endpoint` value | `K3S_DATASTORE_ENDPOINT` | | Especifique o nome da fonte de dados etcd, NATS, MySQL, Postgres ou SQLite | +| `--datastore-cafile` value | `K3S_DATASTORE_CAFILE` | | Arquivo de Autoridade de Certificação TLS usado para proteger a comunicação de backend do armazenamento de dados | +| `--datastore-certfile` value | `K3S_DATASTORE_CERTFILE` | | Arquivo de certificação TLS usado para proteger a comunicação de backend do datastore | +| `--datastore-keyfile` value | `K3S_DATASTORE_KEYFILE` | | Arquivo de chave TLS usado para proteger a comunicação de backend do datastore | +| `--etcd-expose-metrics` | | false | Exponha métricas etcd à interface do cliente | +| `--etcd-disable-snapshots` | | false | Desabilitar snapshots automáticos do etcd | +| `--etcd-snapshot-name` value | | "etcd-snapshot-<unix-timestamp>" | Defina o nome base dos instantâneos do etcd. | +| `--etcd-snapshot-schedule-cron` value | | "0 */12 \* \* \*" | Tempo de intervalo de instantâneo na especificação cron, por exemplo, a cada 5 horas '0 */5 _ \* _' | +| `--etcd-snapshot-retention` value | | 5 | Número de instantâneos a reter | +| `--etcd-snapshot-dir` value | | $\{data-dir\}/db/snapshots | Diretório para salvar snapshots do banco de dados | +| `--etcd-s3` | | | Habilitar backup para S3 | +| `--etcd-s3-endpoint` value | | "s3.amazonaws.com" | Endpoint URL S3 | +| `--etcd-s3-endpoint-ca` value | | | Certificado CA personalizado S3 para conectar ao endpoint S3 | +| `--etcd-s3-skip-ssl-verify` | | | Desabilita a validação do certificado SSL S3 | +| `--etcd-s3-access-key` value | `AWS_ACCESS_KEY_ID` | | Chave de acesso S3 | +| `--etcd-s3-secret-key` value | `AWS_SECRET_ACCESS_KEY` | | Chave secreta S3 | +| `--etcd-s3-bucket` value | | | Nome do bucket S3 | +| `--etcd-s3-region` value | | "us-east-1" | Região S3 / localização do bucket (opcional) | +| `--etcd-s3-folder` value | | | Pasta S3 | +| `--etcd-s3-proxy` | | | Servidor proxy a ser usado ao conectar ao S3, substituindo quaisquer variáveis ​​de ambiente relacionadas ao proxy | +| `--etcd-s3-config-secret` | | | Nome do segredo no namespace kube-system usado para configurar o S3, se o etcd-s3 estiver habilitado e nenhuma outra opção do etcd-s3 estiver definida | +| `--etcd-s3-insecure` | | | Desabilita S3 sobre HTTPS | +| `--etcd-s3-timeout` value | | 5m0s | Tempo limite S3 (padrão: 5m0s) | + + + + +### Opções de Cluster + +| Flag | Variável de Ambiente | Descrição | +| --------------------------- | ---------------------- | ---------------------------------------------------------------------------- | +| `--token` value, `-t` value | `K3S_TOKEN` | Segredo compartilhado usado para unir um servidor ou agente a um cluster | +| `--token-file` value | `K3S_TOKEN_FILE` | Arquivo contendo o cluster-secret/token | +| `--agent-token` value | `K3S_AGENT_TOKEN` | Segredo compartilhado usado para unir agentes ao cluster, mas não servidores | +| `--agent-token-file` value | `K3S_AGENT_TOKEN_FILE` | Arquivo contendo o segredo do agente | +| `--server` value | `K3S_URL` | Servidor ao qual se conectar, usado para unir um cluster | +| `--cluster-init` | `K3S_CLUSTER_INIT` | Inicializar um novo cluster usando o Etcd incorporado | +| `--cluster-reset` | `K3S_CLUSTER_RESET` | Esqueça todos os pares e torne-se o único membro de um novo cluster | + + +### Opções do Kubeconfig Admin + +| Flag | Variável de Ambiente | Descrição | +| ------------------------------------ | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--write-kubeconfig value, -o` value | `K3S_KUBECONFIG_OUTPUT` | Grava o kubeconfig para o cliente administrador neste arquivo | +| `--write-kubeconfig-mode` value | `K3S_KUBECONFIG_MODE` | Grava o kubeconfig com este [modo.](https://en.wikipedia.org/wiki/Chmod) O arquivo kubeconfig é de propriedade do root e é escrito com um modo padrão de 600. Alterar o modo para 644 permitirá que ele seja lido por outros usuários sem privilégios no host. | +| `--write-kubeconfig-group` value | `K3S_KUBECONFIG_GROUP` | Grava o grupo kubeconfig. Combinado com `--write-kubeconfig-mode`, permitirá que seus administradores do k3s acessem o arquivo kubeconfig, mas mantendo o arquivo de propriedade do root. | + +## Opções Avançadas + +### Logging + +| Flag | Valor Padrão | Descrição | +| ----------------------- | ------------ | ------------------------------------------------------------------------------------------------- | +| `--debug` | N/A | Ativar logs de depuração | +| `-v` value | 0 | Número para o detalhamento do nível de log | +| `--vmodule` value | N/A | Lista separada por vírgulas de configurações FILE_PATTERN=LOG_LEVEL para log filtrado por arquivo | +| `--log value, -l` value | N/A | Logar no arquivo | +| `--alsologtostderr` | N/A | Logar no erro padrão e também no arquivo (se definido) | + +### Listeners + +| Flag | Valor Padrão | Descrição | +| --------------------------- | ------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--bind-address` value | 0.0.0.0 | Endereço de bind k3s | +| `--https-listen-port` value | 6443 | Porta de escuta HTTPS | +| `--advertise-address` value | node-external-ip/node-ip | Endereço IPv4/IPv6 que o apiserver anuncia para seu ponto de extremidade de serviço
Observe que o intervalo IP primário `service-cidr` deve ser da mesma família de endereços que o endereço anunciado | +| `--advertise-port` value | listen-port/0 | Porta que o apiserver usa para anunciar aos membros do cluster | +| `--tls-san` value | N/A | Adicione nomes de host adicionais ou endereços IPv4/IPv6 como Nomes Alternativos de Assunto no certificado TLS | +| `--tls-san-security` | true | Proteja o certificado TLS do servidor recusando-se a adicionar nomes alternativos de assunto não associados ao serviço apiserver do kubernetes, nós do servidor ou valores da opção tls-san | + + +### Data + +| Flag | Valor Padrão | Descrição | +| ---------------------------- | ------------------------------------------------------------ | --------------------------- | +| `--data-dir value, -d` value | `/var/lib/rancher/k3s` or `${HOME}/.rancher/k3s` if not root | Pasta para armazenar estado | + +### Criptografia de Secrets + +| Flag | Valor Padrão | Descrição | +| ---------------------- | ------------ | ----------------------------------------- | +| `--secrets-encryption` | false | Habilitar criptografia secreta em repouso | + + +### Rede + +| Flag | Valor Padrão | Descrição | +| --------------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `--cluster-cidr` value | "10.42.0.0/16" | CIDRs de rede IPv4/IPv6 para usar no pod IPs | +| `--service-cidr` value | "10.43.0.0/16" | CIDRs de rede IPv4/IPv6 para uso em serviço | +| `--service-node-port-range` value | "30000-32767" | Intervalo de portas a ser reservado para serviços com visibilidade NodePort | +| `--cluster-dns` value | "10.43.0.10" | IP do cluster IPv4 para o serviço coredns. Deve estar no seu intervalo service-cidr | +| `--cluster-domain` value | "cluster.local" | Cluster Domain | +| `--flannel-backend` value | "vxlan" | Um de 'none', 'vxlan', 'ipsec' (obsoleto), 'host-gw', 'wireguard-native' ou 'wireguard' (obsoleto) | +| `--flannel-ipv6-masq` | "N/A" | Habilitar mascaramento IPv6 para pod | +| `--flannel-external-ip` | "N/A" | Use endereços IP externos do nó para tráfego Flannel | +| `--servicelb-namespace` value | "kube-system" | Namespace dos pods para o componente servicelb | +| `--egress-selector-mode` value | "agent" | Deve ser um dos seguintes:
  • desabilitado: O apiserver não usa túneis de agente para se comunicar com nós. Requer que os servidores executem agentes e tenham conectividade direta com o kubelet em agentes, ou o apiserver não poderá acessar os pontos de extremidade do serviço ou executar kubectl exec e kubectl logs.
  • agente: O apiserver usa túneis de agente para se comunicar com nós. Os nós permitem a conexão do túnel de endereços de loopback. Requer que os servidores também executem agentes, ou o apiserver não poderá acessar os pontos de extremidade do serviço. O padrão histórico para k3s.
  • pod: O apiserver usa túneis de agente para se comunicar com nós e pontos de extremidade do serviço, roteando conexões de ponto de extremidade para o agente correto observando os nós. Os nós permitem a conexão do túnel de endereços de loopback ou um CIDR atribuído ao seu nó.
  • cluster: O apiserver usa túneis de agente para se comunicar com nós e pontos de extremidade do serviço, roteando conexões de ponto de extremidade para o agente correto observando os pontos de extremidade. Os nós permitem a conexão de túnel a partir de endereços de loopback ou do intervalo CIDR do cluster configurado.
| + + +### Classe de Armazenamento + +| Flag | Descrição | +| ------------------------------------ | ----------------------------------------------------------------------------------------- | +| `--default-local-storage-path` value | Caminho de armazenamento local padrão para classe de armazenamento do provisionador local | + +### Componentes do Kubernetes + +| Flag | Descrição | +| ---------------------------- | -------------------------------------------------------------------------------------------------------- | +| `--disable` value | Consulte "[Using the `--disable` flag](../installation/packaged-components.md#using-the---disable-flag)" | +| `--disable-scheduler` | Desabilitar o agendador padrão do Kubernetes | +| `--disable-cloud-controller` | Desabilitar o gerenciador de controlador de nuvem padrão do k3s | +| `--disable-kube-proxy` | Desabilitar execução do kube-proxy | +| `--disable-network-policy` | Desabilitar controlador de política de rede padrão k3s | +| `--disable-helm-controller` | Desabilitar controlador Helm | + + +### Flags Customizada para Processos do Kubernetes + +| Flag | Descrição | +| ------------------------------------------- | ----------------------------------------------------------------------- | +| `--etcd-arg` value | Bandeira personalizada para processo etcd | +| `--kube-apiserver-arg` value | Sinalizador personalizado para o processo kube-apiserver | +| `--kube-scheduler-arg` value | Bandeira personalizada para o processo kube-scheduler | +| `--kube-controller-manager-arg` value | Bandeira personalizada para o processo kube-controller-manager | +| `--kube-cloud-controller-manager-arg` value | Sinalizador personalizado para o processo kube-cloud-controller-manager | +| `--kubelet-arg` value | Bandeira personalizada para o processo kubelet | +| `--kube-proxy-arg` value | Bandeira personalizada para o processo kube-proxy | + +### Experimental Options + +| Flag | Descrição | +| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `--rootless` | Modo rootless | +| `--enable-pprof` | Habilitar ponto de extremidade pprof na porta do supervisor | +| `--docker` | Use cri-dockerd em vez de containerd | +| `--prefer-bundled-bin` | Prefira binários de espaço de usuário agrupados em vez de binários de host | +| `--disable-agent` | Consulte "[Executando Servidores Sem Agente](../advanced.md#running-agentless-servers-experimental)" | +| `--embedded-registry` | Consulte "[Espelho do Registro Incorporado](../installation/registry-mirror.md)" | +| `--vpn-auth` | Consulte "[Integração com o provedor Tailscale VPN](../networking/distributed-multicloud.md#integration-with-the-tailscale-vpn-provider-experimental)" | +| `--vpn-auth-file` | Consulte "[Integração com o provedor Tailscale VPN](../networking/distributed-multicloud.md#integration-with-the-tailscale-vpn-provider-experimental)" | + +### Opções Depreciadas + +| Flag | Variável de Ambiente | Descrição | +| --------------------------------------- | -------------------- | -------------------------------------------------------------------------------------------------------- | +| `--no-flannel` | N/A | Use `--flannel-backend=none` | +| `--no-deploy` value | N/A | Use `--disable` | +| `--cluster-secret` value | `K3S_CLUSTER_SECRET` | Use `--token` | +| `--flannel-backend` wireguard | N/A | Use `--flannel-backend=wireguard-native` | +| `--flannel-backend` value=option1=value | N/A | Use `--flannel-conf` para especificar o arquivo de configuração do flannel com a configuração do backend | + + +## Ajuda do K3s Server CLI + +> Se uma opção aparecer entre colchetes abaixo, por exemplo `[$K3S_TOKEN]`, significa que a opção pode ser passada como uma variável de ambiente com esse nome. + +```bash +NAME: + k3s server - Run management server + +USAGE: + k3s server [OPTIONS] + +OPTIONS: + --config FILE, -c FILE (config) Carregar configuração do ARQUIVO (padrão: "/etc/rancher/k3s/config.yaml") [$K3S_CONFIG_FILE] + --debug (logging) Ativar logs de depuração [$K3S_DEBUG] + -v value (logging) Número para a verbosidade do nível de log (padrão: 0) + --vmodule value (logging) Lista separada por vírgulas de configurações FILE_PATTERN=LOG_LEVEL para registro filtrado por arquivo + --log value, -l value (logging) Arquivo Log + --alsologtostderr (logging) Registre no erro padrão e também no arquivo (se definido) + --bind-address value (listener) Endereço de ligação k3s (padrão: 0.0.0.0) + --https-listen-port value (listener) Porta de escuta HTTPS (padrão: 6443) + --advertise-address value (listener) Endereço IPv4/IPv6 que o apiserver usa para anunciar aos membros do cluster (padrão: node-external-ip/node-ip) + --advertise-port value (listener) Porta que o apiserver usa para anunciar aos membros do cluster (padrão: listen-port) (padrão: 0) + --tls-san value (listener) Adicione nomes de host adicionais ou endereços IPv4/IPv6 como nomes alternativos de assunto no certificado TLS do servidor + --tls-san-security (listener) Proteja o certificado TLS do servidor recusando-se a adicionar nomes alternativos de assunto não associados ao serviço apiserver do kubernetes, nós do servidor ou valores da opção tls-san (padrão: true) + --data-dir value, -d value (data) Pasta para armazenar o estado padrão /var/lib/rancher/k3s ou ${HOME}/.rancher/k3s se não for root [$K3S_DATA_DIR] + --cluster-cidr value (networking) CIDRs de rede IPv4/IPv6 a serem usados ​​para IPs de pod (padrão: 10.42.0.0/16) + --service-cidr value (networking) CIDRs de rede IPv4/IPv6 a serem usados ​​para IPs de serviço (padrão: 10.43.0.0/16) + --service-node-port-range value (networking) Intervalo de portas a ser reservado para serviços com visibilidade NodePort (padrão: "30000-32767") + --cluster-dns value (networking) IP do cluster IPv4 para o serviço coredns. Deve estar no seu intervalo service-cidr (padrão: 10.43.0.10) + --cluster-domain value (networking) Domínio do Cluster (padrão: "cluster.local") + --flannel-backend value (networking) Backend (valores válidos: 'none', 'vxlan', 'host-gw', 'wireguard-native' (padrão: "vxlan") + --flannel-ipv6-masq (networking) Habilitar mascaramento IPv6 para pod + --flannel-external-ip (networking) Use endereços IP externos do nó para tráfego Flannel + --egress-selector-mode value (networking) Um de 'agente', 'cluster', 'pod', 'desabilitado' (padrão: "agente") + --servicelb-namespace value (networking) Namespace dos pods para o componente servicelb (padrão: "kube-system") + --write-kubeconfig value, -o value (client) Escreva o kubeconfig para o cliente administrador neste arquivo [$K3S_KUBECONFIG_OUTPUT] + --write-kubeconfig-mode value (client) Escreva kubeconfig com este modo [$K3S_KUBECONFIG_MODE] + --write-kubeconfig-group value (client) Escreva kubeconfig com este grupo [$K3S_KUBECONFIG_GROUP] + --helm-job-image value (helm) Imagem padrão a ser usada para trabalhos de helm + --token value, -t value (cluster) Segredo compartilhado usado para unir um servidor ou agente a um cluster [$K3S_TOKEN] + --token-file value (cluster) Arquivo contendo o token [$K3S_TOKEN_FILE] + --agent-token value (cluster) Segredo compartilhado usado para unir agentes ao cluster, mas não servidores [$K3S_AGENT_TOKEN] + --agent-token-file value (cluster) Arquivo contendo o segredo do agente [$K3S_AGENT_TOKEN_FILE] + --server value, -s value (cluster) Servidor para conectar, usado para ingressar em um cluster [$K3S_URL] + --cluster-init (cluster) Inicializar um novo cluster usando Etcd incorporado [$K3S_CLUSTER_INIT] + --cluster-reset (cluster) Esqueça todos os pares e torne-se o único membro de um novo cluster [$K3S_CLUSTER_RESET] + --cluster-reset-restore-path value (db) Caminho para o arquivo de instantâneo a ser restaurado + --kube-apiserver-arg value (flags) Sinalizador personalizado para o processo kube-apiserver + --etcd-arg value (flags) Bandeira personalizada para processo etcd + --kube-controller-manager-arg value (flags) Bandeira personalizada para o processo kube-controller-manager + --kube-scheduler-arg value (flags) Bandeira personalizada para o processo kube-scheduler + --kube-cloud-controller-manager-arg value (flags) Sinalizador personalizado para o processo kube-cloud-controller-manager + --datastore-endpoint value (db) Especifique o nome da fonte de dados etcd, NATS, MySQL, Postgres ou SQLite (padrão) [$K3S_DATASTORE_ENDPOINT] + --datastore-cafile value (db) Arquivo de Autoridade de Certificação TLS usado para proteger a comunicação de backend do datastore [$K3S_DATASTORE_CAFILE] + --datastore-certfile value (db) Arquivo de certificado TLS usado para proteger a comunicação de backend do datastore [$K3S_DATASTORE_CERTFILE] + --datastore-keyfile value (db) Arquivo de chave TLS usado para proteger a comunicação de backend do datastore [$K3S_DATASTORE_KEYFILE] + --etcd-expose-metrics (db) Exponha métricas etcd à interface do cliente. (padrão: falso) + --etcd-disable-snapshots (db) Desabilitar snapshots automáticos do etcd + --etcd-snapshot-name value (db) Defina o nome base dos instantâneos do etcd (padrão: etcd-snapshot-) (padrão: "etcd-snapshot") + --etcd-snapshot-schedule-cron value (db) Tempo de intervalo da execução da cron de snapshots, por exemplo, a cada 5 horas '0 */5 * * *' (padrão: "0 */12 * * *") + --etcd-snapshot-retention value (db) Número de instantâneos a reter (padrão: 5) + --etcd-snapshot-dir value (db) Diretório para salvar instantâneos do banco de dados. (padrão: ${data-dir}/db/snapshots) + --etcd-snapshot-compress (db) Compactar snapshot etcd + --etcd-s3 (db) Habilitar backup para S3 + --etcd-s3-endpoint value (db) S3 endpoint url (default: "s3.amazonaws.com") + --etcd-s3-endpoint-ca value (db) Certificado CA customizado S3 para conectar ao endpoint S3 + --etcd-s3-skip-ssl-verify (db) Desabilita a validação do certificado SSL S3 + --etcd-s3-access-key value (db) Chave de acesso S3 [$AWS_ACCESS_KEY_ID] + --etcd-s3-secret-key value (db) Chave secreta S3 [$AWS_SECRET_ACCESS_KEY] + --etcd-s3-bucket value (db) Nome do bucket S3 + --etcd-s3-region value (db) Região S3 / localização do bucket (opcional) (padrão: "us-east-1") + --etcd-s3-folder value (db) Pasta S3 + --etcd-s3-proxy value (db) Servidor proxy a ser usado ao conectar ao S3, substituindo quaisquer variáveis ​​de ambiente relacionadas ao proxy + --etcd-s3-config-secret value (db) Nome do secret no namespace kube-system usado para configurar o S3, se o etcd-s3 estiver habilitado e nenhuma outra opção do etcd-s3 estiver definida + --etcd-s3-insecure (db) Desabilita S3 sobre HTTPS + --etcd-s3-timeout value (db) Tempo limite S3 (padrão: 5m0s) + --default-local-storage-path value (storage) Caminho de armazenamento local padrão para classe de armazenamento do provisionador local + --disable value (components) Não implante componentes empacotados e exclua quaisquer componentes implantados (itens válidos: coredns, servicelb, traefik, local-storage, metrics-server, runtimes) + --disable-scheduler (components) Desabilitar o agendador padrão do Kubernetes + --disable-cloud-controller (components) Desabilitar o gerenciador de controlador de nuvem padrão do k3s + --disable-kube-proxy (components) Desabilitar execução do kube-proxy + --disable-network-policy (components) Desabilitar controlador de política de rede padrão k3s + --disable-helm-controller (components) Desabilitar controlador Helm + --embedded-registry (experimental/components) Habilitar registro de contêiner distribuído incorporado; requer o uso de contêiner incorporado; quando habilitado, os agentes também escutarão na porta do supervisor + --supervisor-metrics (experimental/components) Habilitar o serviço de métricas internas do k3s na porta do supervisor; quando habilitado, os agentes também escutarão na porta do supervisor + --node-name value (agent/node) Nome do nó [$K3S_NODE_NAME] + --with-node-id (agent/node) Adicionar id ao nome do nó + --node-label value (agent/node) Registrando e iniciando o kubelet com conjunto de rótulos + --node-taint value (agent/node) Registrando kubelet com conjunto de taints + --image-credential-provider-bin-dir value (agent/node) O caminho para o diretório onde os binários do plugin do provedor de credenciais estão localizados (padrão: "/var/lib/rancher/credentialprovider/bin") + --image-credential-provider-config value (agent/node) O caminho para o arquivo de configuração do plugin do provedor de credenciais (padrão: "/var/lib/rancher/credentialprovider/config.yaml") + --docker (agent/runtime) (experimental) Use cri-dockerd em vez de containerd + --container-runtime-endpoint value (agent/runtime) Desabilite o containerd incorporado e use o soquete CRI no caminho fornecido; quando usado com --docker, isso define o caminho do soquete do docker + --default-runtime value (agent/runtime) Defina o tempo de execução padrão no containerd + --image-service-endpoint value (agent/runtime) Desabilite o serviço de imagem containerd incorporado e use o socket de serviço de imagem remoto no caminho fornecido. Se não for especificado, o padrão é --container-runtime-endpoint. + --disable-default-registry-endpoint (agent/containerd) Desabilita o ponto de extremidade de registro padrão de fallback do containerd quando um espelho é configurado para esse registro + --nonroot-devices (agent/containerd) Permite que pods não root acessem dispositivos definindo device_ownership_from_security_context=true na configuração CRI do containerd + --pause-image value (agent/runtime) Imagem de pausa personalizada para containerd ou docker sandbox (padrão: "rancher/mirrored-pause:3.6") + --snapshotter value (agent/runtime) Substituir snapshotter padrão do containerd (padrão: "overlayfs") + --private-registry value (agent/runtime) Arquivo de configuração de registro privado (padrão: "/etc/rancher/k3s/registries.yaml") + --system-default-registry value (agent/runtime) Registro privado a ser usado para todas as imagens do sistema [$K3S_SYSTEM_DEFAULT_REGISTRY] + --node-ip value, -i value (agent/networking) Endereços IPv4/IPv6 para anunciar para o nó + --node-external-ip value (agent/networking) Endereços IP externos IPv4/IPv6 para anunciar para o nó + --node-internal-dns value (agent/networking) endereços DNS internos para anunciar o nó + --node-external-dns value (agent/networking) endereços DNS externos para anunciar o nó + --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] + --flannel-iface value (agent/networking) Substituir interface de flanela padrão + --flannel-conf value (agent/networking) Substituir arquivo de configuração flannel padrão + --flannel-cni-conf value (agent/networking) Substituir arquivo de configuração cni flannel padrão + --vpn-auth value (agent/networking) (experimental) Credenciais para o provedor VPN. Deve incluir o nome do provedor e a chave de junção no formato name=,joinKey=[,controlServerURL=][,extraArgs=] [$K3S_VPN_AUTH] + --vpn-auth-file value (agent/networking) (experimental) Arquivo contendo credenciais para o provedor VPN. Ele deve incluir o nome do provedor e a chave de junção no formato name=,joinKey=[,controlServerURL=][,extraArgs=] [$K3S_VPN_AUTH_FILE] + --kubelet-arg value (agent/flags) Bandeira personalizada para o processo kubelet + --kube-proxy-arg value (agent/flags) Bandeira personalizada para o processo kube-proxy + --protect-kernel-defaults (agent/node) Comportamento de ajuste do kernel. Se definido, erro se os ajustes do kernel forem diferentes dos padrões do kubelet. + --secrets-encryption Habilitar criptografia secreta em repouso + --enable-pprof (experimental) Habilitar ponto de extremidade pprof na porta do supervisor + --rootless (experimental) Modo rootless + --prefer-bundled-bin (experimental) Prefira binários de espaço de usuário agrupados em vez de binários de host + --selinux (agent/node) Habilitar SELinux no containerd [$K3S_SELINUX] + --lb-server-port value (agent/node) Porta local para o balanceador de carga do cliente supervisor. Se o supervisor e o apiserver não estiverem colocalizados, uma porta adicional 1 a menos que esta porta também será usada para o balanceador de carga do cliente apiserver. (padrão: 6444) [$K3S_LB_SERVER_PORT] +``` \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/token.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/token.md new file mode 100644 index 000000000..c6b9e4046 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cli/token.md @@ -0,0 +1,162 @@ +--- +title: token +--- + +# k3s token + +O K3s usa tokens para proteger o processo de junção de nós e criptografar informações confidenciais que são persistidas no datastore. Os tokens autenticam o cluster para o nó de junção e o nó para o cluster. + +## Formato de Token + +Os tokens K3s podem ser especificados em formato seguro ou curto. O formato seguro é o preferido, pois permite que o cliente autentique a identidade do cluster ao qual está se unindo, antes de enviar credenciais. + +### Seguro + +O formato de token seguro (ocasionalmente chamado de token "completo") contém as seguintes partes: + +`::` + +* `prefix`: um prefixo `K10` fixo que identifica o formato do token +* `hash CA do cluster`: o hash do certificado CA do servidor do cluster, usado para autenticar o servidor no nó de junção. + * Para certificados CA autoassinados, esta é a soma SHA256 do certificado formatado em PEM, conforme armazenado no disco. + * Para certificados CA personalizados, esta é a soma SHA256 da codificação DER do certificado raiz; comumente conhecido como impressão digital do certificado. +* `credentials`: o nome de usuário e a senha, ou token portador, usados ​​para autenticar o nó de junção no cluster. + +#### Inicialização TLS + +Quando um token seguro é especificado, o nó de junção executa as seguintes etapas para validar a identidade do servidor ao qual ele se conectou, antes de transmitir credenciais: +1. Com a verificação TLS desabilitada, baixe o pacote CA de `/cacerts` no servidor ao qual ele está se juntando. +2. Calcule o hash SHA256 do certificado CA, conforme descrito acima. +3. Compare o hash SHA256 calculado com o hash do token. +4. Se o hash corresponder, valide se o certificado apresentado pelo servidor pode ser validado pelo pacote CA do servidor. +5. Se o certificado do servidor for válido, apresente credenciais para se juntar ao cluster usando autenticação de token básica ou portadora, dependendo do tipo de token. + +### Curto + +O formato de token curto inclui apenas a senha ou o token portador usado para autenticar o nó de junção ao cluster. + +Se um token curto for usado, o nó de junção confia implicitamente no pacote CA apresentado pelo servidor; as etapas 2 a 4 no processo de Bootstrapping TLS são ignoradas. A conexão inicial pode ser vulnerável ao ataque [man-in-the-middle](https://en.wikipedia.org/wiki/Man-in-the-middle_attack). + +## Tipos de Token + +O K3s suporta três tipos de tokens. Apenas o token do servidor está disponível por padrão; tipos de token adicionais devem ser configurados ou criados pelo administrador. + +| Tipo | Opção CLI | Variável de ambiente | +| --------- | --------------- | -------------------- | +| Servidor | `--token` | `K3S_TOKEN` | +| Agente | `--agent-token` | `K3S_AGENT_TOKEN` | +| Bootstrap | `n/a` | `n/a` | + +### Servidor + +Se nenhum token for fornecido ao iniciar o primeiro servidor no cluster, um será criado com uma senha aleatória. O token do servidor é sempre gravado em `/var/lib/rancher/k3s/server/token`, em formato seguro. + +O token do servidor pode ser usado para unir nós de servidor e agente ao cluster. Qualquer pessoa com acesso ao token do servidor essencialmente tem acesso de administrador total ao cluster. Este token deve ser guardado cuidadosamente. + +O token do servidor também é usado como a frase-senha [PBKDF2](https://en.wikipedia.org/wiki/PBKDF2) para criptografar informações confidenciais que são persistidas no armazenamento de dados conhecido como dados bootstrap. Os dados bootstrap são essenciais para configurar novos nós de servidor ou restaurar de um snapshot. Por esse motivo, o token deve ser copiado junto com o próprio armazenamento de dados do cluster. + +:::warning +A menos que certificados CA personalizados estejam em uso, somente o formato de token curto (somente senha) pode ser usado ao iniciar o primeiro servidor no cluster. Isso ocorre porque o hash CA do cluster não pode ser conhecido até que o servidor tenha gerado os certificados CA do cluster autoassinados. +::: + +Para obter mais informações sobre o uso de certificados CA personalizados, consulte a [documentação do certificado k3s](./certificate.md). +Para obter mais informações sobre como fazer backup do seu cluster, consulte a documentação do [Backup and Restore](../datastore/backup-restore.md). + +### Agente + +Por padrão, o token do agente é o mesmo que o token do servidor. O token do agente pode ser definido antes ou depois que o cluster foi iniciado, alterando a opção CLI ou a variável de ambiente em todos os servidores no cluster. O token do agente é semelhante ao token do servidor, pois é configurado estaticamente e não expira. + +O token do agente é gravado em `/var/lib/rancher/k3s/server/agent-token`, em formato seguro. Se nenhum token do agente for especificado, este arquivo é um link para o token do servidor. + +### Bootstrap + +O K3s suporta agentes gerados dinamicamente e com expiração automática [tokens bootstrap](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/). + +## k3s token + +Os tokens bootstrap K3s usam o mesmo código de geração e validação que os tokens bootstrap `kubeadm token`, e a CLI `k3s token` é semelhante. + +``` +NAME: + k3s token - Manage bootstrap tokens + +USAGE: + k3s token command [command options] [arguments...] + +COMMANDS: + create Create bootstrap tokens on the server + delete Delete bootstrap tokens on the server + generate Generate and print a bootstrap token, but do not create it on the server + list List bootstrap tokens on the server + rotate Rotate original server token with a new bootstrap token + +OPTIONS: + --help, -h show help +``` + +#### `k3s token create [token]` + +Crie um novo token. O `[token]` é o token real a ser escrito, conforme gerado por `k3s token generate`. Se nenhum token for fornecido, um aleatório será gerado. + +Um token em formato seguro, incluindo o hash do cluster CA, será gravado no stdout. A saída desse comando deve ser salva, pois a parte secreta do token não pode ser mostrada novamente. + +| Flag | Descrição | +| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--data-dir` value | Pasta para armazenar o estado (padrão: /var/lib/rancher/k3s ou $\{HOME\}/.rancher/k3s se não for root) | +| `--kubeconfig` value | Servidor para conectar a [$KUBECONFIG] | +| `--description` value | Uma descrição amigável de como este token é usado | +| `--groups` value | Grupos extras que este token autenticará quando usado para autenticação. (padrão: Padrão: "system:bootstrappers:k3s:default-node-token") | +| `--ttl` value | A duração antes que o token seja excluído automaticamente (por exemplo, 1s, 2m, 3h). Se definido como '0', o token nunca expirará (padrão: 24h0m0s) | +| `--usages` value | Descreve as maneiras pelas quais este token pode ser usado. (padrão: "signing,authentication") | + +#### `k3s token delete` + +Exclua um ou mais tokens. O token completo pode ser fornecido, ou apenas o ID do token. + +| Flag | Descrição | +| -------------------- | ------------------------------------------------------------------------------------------------------ | +| `--data-dir` value | Pasta para armazenar o estado (padrão: /var/lib/rancher/k3s ou $\{HOME\}/.rancher/k3s se não for root) | +| `--kubeconfig` value | Servidor para conectar a [$KUBECONFIG] | + +#### `k3s token generate` + +Gere um token bootstrap gerado aleatoriamente. + +Você não precisa usar este comando para gerar um token. Você pode fazer isso sozinho, desde que esteja no formato `[a-z0-9]{6}.[a-z0-9]{16}`, onde a primeira parte é o ID do token, e a segunda parte é o segredo. + +| Flag | Descrição | +| -------------------- | ------------------------------------------------------------------------------------------------------ | +| `--data-dir` value | Pasta para armazenar o estado (padrão: /var/lib/rancher/k3s ou $\{HOME\}/.rancher/k3s se não for root) | +| `--kubeconfig` value | Servidor para conectar a [$KUBECONFIG] | + +#### `k3s token list` + +Listar tokens bootstrap, mostrando seu ID, descrição e tempo de vida restante. + +| Flag | Descrição | +| -------------------- | ------------------------------------------------------------------------------------------------------ | +| `--data-dir` value | Pasta para armazenar o estado (padrão: /var/lib/rancher/k3s ou $\{HOME\}/.rancher/k3s se não for root) | +| `--kubeconfig` value | Servidor para conectar a [$KUBECONFIG] | +| `--output` value | Formato de saída. Opções válidas: text, json (padrão: "text") | + +#### `k3s token rotate` + +:::info Version Gate +Disponível a partir das versões de outubro de 2023 (v1.28.2+k3s1, v1.27.7+k3s1, v1.26.10+k3s1, v1.25.15+k3s1). +::: + +Gire o token do servidor original com um novo token do servidor. Após executar este comando, todos os servidores e quaisquer agentes que se juntaram originalmente com o token antigo devem ser reiniciados com o novo token. + +Se você não especificar um novo token, um será gerado para você. + + | Flag | Descrição | + | -------------------- | ------------------------------------------------------------------------------------------------------ | + | `--data-dir` value | Pasta para armazenar o estado (padrão: /var/lib/rancher/k3s ou $\{HOME\}/.rancher/k3s se não for root) | + | `--kubeconfig` value | Servidor para conectar a [$KUBECONFIG] | + | `--server` value | Servidor para conectar (padrão: "https://127.0.0.1:6443") [$K3S_URL] | + | `--token` value | Token existente usado para unir um servidor ou agente a um cluster [$K3S_TOKEN] | + | `--new-token` value | Novo token que substitui o token existente | + +:::warning +Os snapshots tirados antes da rotação exigirão o token do servidor antigo ao restaurar o cluster +::: \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/cluster-access.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cluster-access.md new file mode 100644 index 000000000..a862e47ce --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/cluster-access.md @@ -0,0 +1,24 @@ +--- +title: Acesso ao Cluster +--- + +O arquivo kubeconfig armazenado em `/etc/rancher/k3s/k3s.yaml` é usado para configurar o acesso ao cluster Kubernetes. Se você instalou ferramentas de linha de comando do Kubernetes upstream, como kubectl ou helm, precisará configurá-las com o caminho kubeconfig correto. Isso pode ser feito exportando a variável de ambiente `KUBECONFIG` ou invocando o sinalizador de linha de comando `--kubeconfig`. Consulte os exemplos abaixo para obter detalhes. + +Utilize a variável de ambiente KUBECONFIG: + +```bash +export KUBECONFIG=/etc/rancher/k3s/k3s.yaml +kubectl get pods --all-namespaces +helm ls --all-namespaces +``` + +Ou especifique a localização do arquivo kubeconfig no comando: + +```bash +kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml get pods --all-namespaces +helm --kubeconfig /etc/rancher/k3s/k3s.yaml ls --all-namespaces +``` + +### Acessando o Cluster de Fora com kubectl + +Copie `/etc/rancher/k3s/k3s.yaml` na sua máquina localizada fora do cluster como `~/.kube/config`. Em seguida, substitua o valor do campo `server` pelo IP ou nome do seu servidor K3s. `kubectl` agora pode gerenciar seu cluster K3s. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/backup-restore.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/backup-restore.md new file mode 100644 index 000000000..27a889042 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/backup-restore.md @@ -0,0 +1,34 @@ +--- +title: Backup e Restauração +--- + +A maneira como o K3s é feito backup e restaurado depende do tipo de armazenamento de dados usado. + +:::warning +Além de fazer backup do próprio datastore, você também deve fazer backup do arquivo de token do servidor em `/var/lib/rancher/k3s/server/token`. +Você deve restaurar este arquivo, ou passar seu valor para a opção `--token`, ao restaurar do backup. +Se você não usar o mesmo valor de token ao restaurar, o snapshot ficará inutilizável, pois o token é usado para criptografar dados confidenciais dentro do próprio datastore. +::: + +## Backup e Restauração com SQLite + +Não são necessários comandos especiais para fazer backup ou restaurar o armazenamento de dados SQLite. + +* Para fazer backup do armazenamento de dados SQLite, faça uma cópia de `/var/lib/rancher/k3s/server/db/`. +* Para restaurar o armazenamento de dados SQLite, restaure o conteúdo de `/var/lib/rancher/k3s/server/db` (e o token, conforme discutido acima). + +## Backup e Restauração com Armazenamento de Dados Externo + +Quando um datastore externo é usado, as operações de backup e restauração são manipuladas fora do K3s. O administrador do banco de dados precisará fazer backup do banco de dados externo ou restaurá-lo de um snapshot ou dump. + +Recomendamos configurar o banco de dados para tirar instantâneos recorrentes. + +Para obter detalhes sobre como tirar instantâneos do banco de dados e restaurar seu banco de dados a partir deles, consulte a documentação oficial do banco de dados: + +- [Documentação Official MySQL](https://dev.mysql.com/doc/refman/8.0/en/replication-snapshot-method.html) +- [Documentação Official PostgreSQL](https://www.postgresql.org/docs/8.3/backup-dump.html) +- [Documentação Official etcd](https://etcd.io/docs/latest/op-guide/recovery/) + +## Backup e Restauração com Armazenamento de Dados etcd Incorporado + +Consulte a [documentação do comando `k3s etcd-snapshot`](../cli/etcd-snapshot.md) para obter informações sobre como executar operações de backup e restauração no armazenamento de dados etcd incorporado. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/cluster-loadbalancer.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/cluster-loadbalancer.md new file mode 100644 index 000000000..f07d66787 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/cluster-loadbalancer.md @@ -0,0 +1,197 @@ +--- +title: Balanceador de Carga de Cluster +--- + +Esta seção descreve como instalar um balanceador de carga externo na frente dos nós de servidor de um cluster K3s de Alta Disponibilidade (HA). Dois exemplos são fornecidos: Nginx e HAProxy. + +:::tip +Os balanceadores de carga externos não devem ser confundidos com o ServiceLB incorporado, que é um controlador incorporado que permite o uso dos Kubernetes LoadBalancer Services sem implantar um controlador de balanceador de carga de terceiros. Para mais detalhes, consulte [Service Load Balancer](../networking/networking-services.md#service-load-balancer). + +Os balanceadores de carga externos podem ser usados ​​para fornecer um endereço de registro fixo para registrar nós ou para acesso externo ao Kubernetes API Server. Para expor os LoadBalancer Services, os balanceadores de carga externos podem ser usados ​​junto ou em vez do ServiceLB, mas na maioria dos casos, controladores de balanceador de carga de substituição, como MetalLB ou Kube-VIP, são uma escolha melhor. +::: + +## Pré-requisitos + +Todos os nós neste exemplo estão executando o Ubuntu 20.04. + +Para ambos os exemplos, suponha que um [cluster HA K3s com etcd incorporado](../datastore/ha-embedded.md) foi instalado em 3 nós. + +Cada servidor k3s é configurado com: +```yaml +# /etc/rancher/k3s/config.yaml +token: lb-cluster-gd +tls-san: 10.10.10.100 +``` + +Os nós têm nomes de host e IPs de: +* server-1: `10.10.10.50` +* server-2: `10.10.10.51` +* server-3: `10.10.10.52` + + +Dois nós adicionais para balanceamento de carga são configurados com nomes de host e IPs de: +* lb-1: `10.10.10.98` +* lb-2: `10.10.10.99` + +Existem três nós adicionais com nomes de host e IPs de: +* agent-1: `10.10.10.101` +* agent-2: `10.10.10.102` +* agent-3: `10.10.10.103` + +## Configurar Balanceador de Carga + + + +[HAProxy](http://www.haproxy.org/) é uma opção de código aberto que fornece um balanceador de carga TCP. Ele também suporta HA para o próprio balanceador de carga, garantindo redundância em todos os níveis. Veja [Documentação do HAProxy](http://docs.haproxy.org/2.8/intro.html) para mais informações. + +Além disso, usaremos KeepAlived para gerar um IP virtual (VIP) que será usado para acessar o cluster. Veja [Documentação do KeepAlived](https://www.keepalived.org/manpage.html) para mais informações. + + + +1) Instalar HAProxy e KeepAlived: + +```bash +sudo apt-get install haproxy keepalived +``` + +2) Adicione o seguinte em `/etc/haproxy/haproxy.cfg` em lb-1 e lb-2: + +``` +frontend k3s-frontend + bind *:6443 + mode tcp + option tcplog + default_backend k3s-backend + +backend k3s-backend + mode tcp + option tcp-check + balance roundrobin + default-server inter 10s downinter 5s + server server-1 10.10.10.50:6443 check + server server-2 10.10.10.51:6443 check + server server-3 10.10.10.52:6443 check +``` +3) Adicione o seguinte em `/etc/keepalived/keepalived.conf` em lb-1 e lb-2: + +``` +global_defs { + enable_script_security + script_user root +} + +vrrp_script chk_haproxy { + script 'killall -0 haproxy' # faster than pidof + interval 2 +} + +vrrp_instance haproxy-vip { + interface eth1 + state # MASTER on lb-1, BACKUP on lb-2 + priority # 200 on lb-1, 100 on lb-2 + + virtual_router_id 51 + + virtual_ipaddress { + 10.10.10.100/24 + } + + track_script { + chk_haproxy + } +} +``` + +6) Reinicie o HAProxy e o KeepAlived em lb-1 e lb-2: + +```bash +systemctl restart haproxy +systemctl restart keepalived +``` + +5) No agente-1, agente-2 e agente-3, execute o seguinte comando para instalar o k3s e ingressar no cluster: + +```bash +curl -sfL https://get.k3s.io | K3S_TOKEN=lb-cluster-gd sh -s - agent --server https://10.10.10.100:6443 +``` + +Agora você pode usar `kubectl` do nó do servidor para interagir com o cluster. +```bash +root@server-1 $ k3s kubectl get nodes -A +NAME STATUS ROLES AGE VERSION +agent-1 Ready 32s v1.27.3+k3s1 +agent-2 Ready 20s v1.27.3+k3s1 +agent-3 Ready 9s v1.27.3+k3s1 +server-1 Ready control-plane,etcd,master 4m22s v1.27.3+k3s1 +server-2 Ready control-plane,etcd,master 3m58s v1.27.3+k3s1 +server-3 Ready control-plane,etcd,master 3m12s v1.27.3+k3s1 +``` + + + + + +## Nginx Load Balancer + +:::danger +O Nginx não suporta nativamente uma configuração de Alta Disponibilidade (HA). Se estiver configurando um cluster HA, ter um único balanceador de carga na frente do K3s reintroduzirá um único ponto de falha. +::: + +[Nginx Open Source](http://nginx.org/) fornece um balanceador de carga TCP. Veja [Usando nginx como balanceador de carga HTTP](https://nginx.org/en/docs/http/load_balancing.html) para mais informações. + +1) Crie um arquivo `nginx.conf` em lb-1 com o seguinte conteúdo: + +``` +events {} + +stream { + upstream k3s_servers { + server 10.10.10.50:6443; + server 10.10.10.51:6443; + server 10.10.10.52:6443; + } + + server { + listen 6443; + proxy_pass k3s_servers; + } +} +``` + +2) Execute o balanceador de carga Nginx em lb-1: + +Usando o docker: + +```bash +docker run -d --restart unless-stopped \ + -v ${PWD}/nginx.conf:/etc/nginx/nginx.conf \ + -p 6443:6443 \ + nginx:stable +``` + +Ou [instale o nginx](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-open-source/) e então execute: + +```bash +cp nginx.conf /etc/nginx/nginx.conf +systemctl start nginx +``` + +3) No agente-1, agente-2 e agente-3, execute o seguinte comando para instalar o k3s e ingressar no cluster: + +```bash +curl -sfL https://get.k3s.io | K3S_TOKEN=lb-cluster-gd sh -s - agent --server https://10.10.10.98:6443 +``` + +Agora você pode usar `kubectl` do nó do servidor para interagir com o cluster. +```bash +root@server1 $ k3s kubectl get nodes -A +NAME STATUS ROLES AGE VERSION +agent-1 Ready 30s v1.27.3+k3s1 +agent-2 Ready 22s v1.27.3+k3s1 +agent-3 Ready 13s v1.27.3+k3s1 +server-1 Ready control-plane,etcd,master 4m49s v1.27.3+k3s1 +server-2 Ready control-plane,etcd,master 3m58s v1.27.3+k3s1 +server-3 Ready control-plane,etcd,master 3m16s v1.27.3+k3s1 +``` + + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/datastore.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/datastore.md new file mode 100644 index 000000000..c16ff661c --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/datastore.md @@ -0,0 +1,90 @@ +--- +title: "Armazenamento de Dados do Cluster" +--- + +A capacidade de executar o Kubernetes usando um datastore diferente do etcd diferencia o K3s de outras distribuições do Kubernetes. Esse recurso fornece flexibilidade aos operadores do Kubernetes. As opções de datastore disponíveis permitem que você selecione um datastore que melhor se adapte ao seu caso de uso. Por exemplo: + +* Se sua equipe não tem experiência em operar o etcd, você pode escolher um banco de dados SQL de nível empresarial como MySQL ou PostgreSQL +* Se você precisa executar um cluster simples e de curta duração em seu ambiente de CI/CD, você pode usar o banco de dados SQLite incorporado +* Se você deseja implantar o Kubernetes na borda e precisa de uma solução altamente disponível, mas não pode arcar com a sobrecarga operacional de gerenciar um banco de dados na borda, você pode usar o datastore HA incorporado do K3s construído sobre o etcd incorporado. + +O K3s oferece suporte às seguintes opções de armazenamento de dados: + +* **Embedded [SQLite](https://www.sqlite.org/index.html)** + O SQLite não pode ser usado em clusters com vários servidores. + O SQLite é o datastore padrão e será usado se nenhuma outra configuração de datastore estiver presente e nenhum arquivo de banco de dados etcd incorporado estiver presente no disco. +* **Embedded etcd** + Consulte a documentação do [High Availability Embedded etcd](ha-embedded.md) para obter mais informações sobre o uso do etcd incorporado com vários servidores. + O etcd incorporado será selecionado automaticamente se o K3s estiver configurado para inicializar um novo cluster etcd, ingressar em um cluster etcd existente ou se os arquivos de banco de dados etcd estiverem presentes no disco durante a inicialização. +* **External Database** + Consulte a documentação do [High Availability External DB](ha.md) para obter mais informações sobre o uso de datastores externos com vários servidores. + Os seguintes datastores externos são suportados: + * [etcd](https://etcd.io/) (certificado em relação à versão 3.5.4) + * [MySQL](https://www.mysql.com) (certificado em relação às versões 5.7 e 8.0) + * [MariaDB](https://mariadb.org/) (certificado em relação à versão 10.6.8) + * [PostgreSQL](https://www.postgresql.org/) (certificado em relação às versões 12.16, 13.12, 14.9 e 15.4) + +:::warning Suporte de Declaração Preparada +O K3s requer suporte de instruções preparadas do BD. Isso significa que poolers de conexão como [PgBouncer](https://www.pgbouncer.org/faq.html#how-to-use-prepared-statements-with-transaction-pooling) podem exigir configuração adicional para funcionar com o K3s. +::: + +### Parâmetros de Configuração do Armazenamento de Dados Externo +Se você deseja usar um datastore externo como PostgreSQL, MySQL ou etcd, você deve definir o parâmetro `datastore-endpoint` para que o K3s saiba como se conectar a ele. Você também pode especificar parâmetros para configurar a autenticação e criptografia da conexão. A tabela abaixo resume esses parâmetros, que podem ser passados ​​como flags CLI ou variáveis ​​de ambiente. + +| CLI Flag | Variável de Ambiente | Descrição | +| ---------------------- | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--datastore-endpoint` | `K3S_DATASTORE_ENDPOINT` | Especifique uma string de conexão PostgreSQL, MySQL ou etcd. Esta é uma string usada para descrever a conexão com o datastore. A estrutura desta string é específica para cada backend e é detalhadabelow. | +| `--datastore-cafile` | `K3S_DATASTORE_CAFILE` | Arquivo TLS Certificate Authority (CA) usado para ajudar a proteger a comunicação com o datastore. Se seu datastore atender solicitações por TLS usando um certificado assinado por uma autoridade de certificação personalizada, você pode especificar essa CA usando esse parâmetro para que o cliente K3s possa verificar o certificado corretamente. | +| `--datastore-certfile` | `K3S_DATASTORE_CERTFILE` | Arquivo de certificado TLS usado para autenticação baseada em certificado de cliente para seu datastore. Para usar esse recurso, seu datastore deve ser configurado para suportar autenticação baseada em certificado de cliente. Se você especificar esse parâmetro, também deverá especificar o parâmetro `datastore-keyfile`. | +| `--datastore-keyfile` | `K3S_DATASTORE_KEYFILE` | Arquivo de chave TLS usado para autenticação baseada em certificado de cliente para seu datastore. Veja o parâmetro `datastore-certfile` anterior para mais detalhes. | + +Como prática recomendada, recomendamos definir esses parâmetros como variáveis ​​de ambiente em vez de argumentos de linha de comando para que suas credenciais de banco de dados ou outras informações confidenciais não sejam expostas como parte das informações do processo. + +### Formato e Funcionalidade do Ponto de Extremidade do Datastore +Conforme mencionado, o formato do valor passado para o parâmetro `datastore-endpoint` depende do backend do datastore. O seguinte detalha esse formato e funcionalidade para cada datastore externo suportado. + + + + + Em sua forma mais comum, o parâmetro datastore-endpoint para PostgreSQL tem o seguinte formato: + + `postgres://username:password@hostname:port/database-name` + + Mais parâmetros de configuração avançados estão disponíveis. Para obter mais informações sobre eles, consulte https://godoc.org/github.com/lib/pq. + + Se você especificar um nome de banco de dados e ele não existir, o servidor tentará criá-lo. + + Se você fornecer apenas `postgres://` como o endpoint, o K3s tentará fazer o seguinte: + + - Conectar ao host local usando `postgres` como nome de usuário e senha + - Criar um banco de dados chamado `kubernetes` + + + + + Em sua forma mais comum, o parâmetro `datastore-endpoint` para MySQL e MariaDB tem o seguinte formato: + + `mysql://username:password@tcp(hostname:3306)/database-name` + + Mais parâmetros de configuração avançados estão disponíveis. Para obter mais informações sobre eles, consulte https://github.com/go-sql-driver/mysql#dsn-data-source-name + + Observe que, devido a um [problema conhecido](https://github.com/k3s-io/k3s/issues/1093) no K3s, você não pode definir o parâmetro `tls`. A comunicação TLS é suportada, mas você não pode, por exemplo, definir este parâmetro como "skip-verify" para fazer com que o K3s ignore a verificação do certificado. + + Se você especificar um nome de banco de dados e ele não existir, o servidor tentará criá-lo. + + Se você fornecer apenas `mysql://` como endpoint, o K3s tentará fazer o seguinte: + + - Conectar ao socket MySQL em `/var/run/mysqld/mysqld.sock` usando o usuário `root` e nenhuma senha + - Criar um banco de dados com o nome `kubernetes` + + + + + Em sua forma mais comum, o parâmetro `datastore-endpoint` para etcd tem o seguinte formato: + + `https://etcd-host-1:2379,https://etcd-host-2:2379,https://etcd-host-3:2379` + + O acima assume um cluster típico de três nós etcd. O parâmetro pode aceitar mais uma URL etcd separada por vírgula. + + + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/ha-embedded.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/ha-embedded.md new file mode 100644 index 000000000..807120470 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/ha-embedded.md @@ -0,0 +1,72 @@ +--- +title: "Alta Disponibilidade Incorporada etcd" +--- + +:::warning +O etcd (HA) incorporado pode ter problemas de desempenho em discos mais lentos, como Raspberry Pis, executados com cartões SD. +::: + +
+Por que um número ímpar de nós de servidor? + +O cluster etcd incorporado HA deve ser composto por um número ímpar de nós de servidor para que o etcd mantenha o quorum. Para um cluster com n servidores, o quorum é (n/2)+1. Para qualquer cluster de tamanho ímpar, adicionar um nó sempre aumentará o número de nós necessários para o quorum. Embora adicionar um nó a um cluster de tamanho ímpar pareça melhor, pois há mais máquinas, a tolerância a falhas é pior, pois exatamente o mesmo número de nós pode falhar sem perder o quorum, mas há mais nós que podem falhar. + +
+ +Um cluster HA K3s com etcd incorporado é composto de: + +- Três ou mais **nós de servidor** que servirão a API do Kubernetes e executarão outros serviços de plano de controle, bem como hospedarão o armazenamento de dados etcd incorporado. +- Opcional: Zero ou mais **nós de agente** que são designados para executar seus aplicativos e serviços +- Opcional: Um **endereço de registro fixo** para nós de agente se registrarem no cluster + +:::note +Para implantar rapidamente grandes clusters de HA, consulte [Projetos Relacionados](../related-projects.md) +::: + +Para começar, primeiro inicie um nó de servidor com o sinalizador `cluster-init` para habilitar o cluster e um token que será usado como um segredo compartilhado para unir servidores adicionais ao cluster. + +```bash +curl -sfL https://get.k3s.io | K3S_TOKEN=SECRET sh -s - server \ + --cluster-init \ + --tls-san= # Opcional, necessário se estiver usando um endereço de registro fixo +``` + +Após iniciar o primeiro servidor, junte o segundo e o terceiro servidores ao cluster usando o segredo compartilhado: + +```bash +curl -sfL https://get.k3s.io | K3S_TOKEN=SECRET sh -s - server \ + --server https://:6443 \ + --tls-san= # Opcional, necessário se estiver usando um endereço de registro fixo +``` + +Verifique se o segundo e o terceiro servidores agora fazem parte do cluster: + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +server1 Ready control-plane,etcd,master 28m vX.Y.Z +server2 Ready control-plane,etcd,master 13m vX.Y.Z +server3 Ready control-plane,etcd,master 10m vX.Y.Z +``` + +Agora você tem um plano de controle altamente disponível. Qualquer servidor agrupado com sucesso pode ser usado no argumento `--server` para unir nós de servidor e agente adicionais. Unir nós de agente adicionais ao cluster segue o mesmo procedimento dos servidores: + +```bash +curl -sfL https://get.k3s.io | K3S_TOKEN=SECRET sh -s - agent --server https://:6443 +``` + +Existem alguns sinalizadores de configuração que devem ser os mesmos em todos os nós do servidor: + +- Sinalizadores relacionados à rede: `--cluster-dns`, `--cluster-domain`, `--cluster-cidr`, `--service-cidr` +- Sinalizadores que controlam a implantação de certos componentes: `--disable-helm-controller`, `--disable-kube-proxy`, `--disable-network-policy` e qualquer componente passado para `--disable` +- Sinalizadores relacionados ao recurso: `--secrets-encryption` + +## Clusters de Nó Único + +:::info Nota de Versão +Disponível a partir de [v1.22.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.22.2%2Bk3s1) +::: + +Se você tiver um cluster existente usando o banco de dados SQLite incorporado padrão, você pode convertê-lo para etcd simplesmente reiniciando seu servidor K3s com o sinalizador `--cluster-init`. Depois de fazer isso, você poderá adicionar instâncias adicionais conforme descrito acima. + +Se um armazenamento de dados etcd for encontrado no disco porque esse nó já foi inicializado ou se juntou a um cluster, os argumentos do armazenamento de dados (`--cluster-init`, `--server`, `--datastore-endpoint`, etc.) serão ignorados. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/ha.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/ha.md new file mode 100644 index 000000000..6b79bf95c --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/datastore/ha.md @@ -0,0 +1,105 @@ +--- +title: Banco de Dados Externo de Alta Disponibilidade +--- + +Esta seção descreve como instalar um cluster K3s de alta disponibilidade com um banco de dados externo. + +:::note +Para implantar rapidamente grandes clusters de HA, consulte [Projetos Relacionados](/related-projects) +::: + +Clusters de servidor único podem atender a uma variedade de casos de uso, mas para ambientes onde o tempo de atividade do plano de controle do Kubernetes é crítico, você pode executar o K3s em uma configuração de HA. Um cluster de HA K3s é composto de: + +- Dois ou mais **nós de servidor** que servirão a API do Kubernetes e executarão outros serviços de plano de controle +- Um **armazenamento de dados externo** (em oposição ao armazenamento de dados SQLite incorporado usado em configurações de servidor único) +- Opcional: Zero ou mais **nós de agente** que são designados para executar seus aplicativos e serviços +- Opcional: Um **endereço de registro fixo** para nós de agente se registrarem no cluster + +Para mais detalhes sobre como esses componentes funcionam juntos, consulte a [seção de arquitetura.](../architecture.md#high-availability-k3s) + +## Esboço da Instalação + +A configuração de um cluster HA requer as seguintes etapas: + +### 1. Criar um Armazenamento de Datastore + +Primeiro, você precisará criar um datastore externo para o cluster. Veja a documentação [Cluster Datastore Options](datastore.md) para mais detalhes. + +### 2. Inicialização Nós do Servidor + +O K3s requer dois ou mais nós de servidor para esta configuração de HA. Consulte o guia [Requirements](../installation/requirements.md) para obter os requisitos mínimos da máquina. + +Ao executar o comando `k3s server` nesses nós, você deve definir o parâmetro `datastore-endpoint` para que o K3s saiba como se conectar ao datastore externo. O parâmetro `token` também pode ser usado para definir um token determinístico ao adicionar nós. Quando vazio, esse token será gerado automaticamente para uso posterior. + +Por exemplo, um comando como o seguinte pode ser usado para instalar o servidor K3s com um banco de dados MySQL como armazenamento de dados externo e [definir um token](../cli/server.md#cluster-options): + +```bash +curl -sfL https://get.k3s.io | sh -s - server \ + --token=SECRET \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" \ + --tls-san= # Opcional, necessário se estiver usando um endereço de registro fixo +``` + +O formato do ponto de extremidade do datastore difere com base no tipo de banco de dados. Para obter detalhes, consulte a seção sobre [formatos de ponto de extremidade do datastore.](../datastore/datastore.md#datastore-endpoint-format-and-functionality) + +Para configurar certificados TLS ao iniciar nós de servidor, consulte o [guia de configuração do datastore.](../datastore/datastore.md#external-datastore-configuration-parameters) + +:::note +As mesmas opções de instalação disponíveis para instalações de servidor único também estão disponíveis para instalações de alta disponibilidade. Para mais detalhes, consulte a documentação [Configuration Options](../installation/configuration.md). +::: + +Por padrão, os nós do servidor serão agendáveis ​​e, portanto, suas cargas de trabalho poderão ser iniciadas neles. Se você deseja ter um plano de controle dedicado onde nenhuma carga de trabalho do usuário será executada, você pode usar taints. O parâmetro `node-taint` permitirá que você configure nós com taints, por exemplo `--node-taint CriticalAddonsOnly=true:NoExecute`. + +Depois de iniciar o processo `k3s server` em todos os nós do servidor, certifique-se de que o cluster tenha sido iniciado corretamente com `k3s kubectl get nodes`. Você deve ver os nós do servidor no estado Ready. + +### 3. Opcional: Adicione a Nós de Servidor + +O mesmo comando de exemplo na Etapa 2 pode ser usado para unir nós de servidor adicionais, onde o token do primeiro nó precisa ser usado. + +Se o primeiro nó do servidor foi iniciado sem o sinalizador CLI `--token` ou a variável `K3S_TOKEN`, o valor do token pode ser recuperado de qualquer servidor já associado ao cluster: + +```bash +cat /var/lib/rancher/k3s/server/token +``` + +Nós de servidor adicionais podem ser adicionados [usando o token](../cli/server.md#cluster-options): + +```bash +curl -sfL https://get.k3s.io | sh -s - server \ + --token=SECRET \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" +``` + +Existem alguns sinalizadores de configuração que devem ser os mesmos em todos os nós do servidor: + +- Sinalizadores relacionados à rede: `--cluster-dns`, `--cluster-domain`, `--cluster-cidr`, `--service-cidr` +- Sinalizadores que controlam a implantação de certos componentes: `--disable-helm-controller`, `--disable-kube-proxy`, `--disable-network-policy` e qualquer componente passado para `--disable` +- Sinalizadores relacionados ao recurso: `--secrets-encryption` + +:::note +Certifique-se de manter uma cópia deste token, pois ele é necessário ao restaurar do backup e adicionar nós. Anteriormente, o K3s não impunha o uso de um token ao usar datastores SQL externos. +::: + +### 4. Opcional: Configurar um Endereço de Registro Fixo + +Os nós de agente precisam de uma URL para registrar. Pode ser o IP ou o nome do host de qualquer nó de servidor, mas em muitos casos eles podem mudar ao longo do tempo. Por exemplo, se estiver executando seu cluster em uma nuvem que suporta grupos de dimensionamento, os nós podem ser criados e destruídos ao longo do tempo, mudando para IPs diferentes do conjunto inicial de nós de servidor. Seria melhor ter um ponto de extremidade estável na frente dos nós de servidor que não mudará ao longo do tempo. Este ponto de extremidade pode ser configurado usando qualquer número de abordagens, como: + +- Um balanceador de carga de camada 4 (TCP) +- DNS round-robin +- Endereços IP virtuais ou elásticos + +Consulte [Cluster Loadbalancer](./cluster-loadbalancer.md) para obter exemplos de configurações. + +Este endpoint também pode ser usado para acessar a API do Kubernetes. Então você pode, por exemplo, modificar seu arquivo [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) para apontar para ele em vez de um nó específico. + +Para evitar erros de certificado em tal configuração, você deve configurar o servidor com a opção `--tls-san=YOUR_IP_OR_HOSTNAME_HERE`. Esta opção adiciona um nome de host ou IP adicional como um Nome Alternativo de Assunto no certificado TLS, e pode ser especificado várias vezes se você quiser acessar por meio do IP e do nome do host. + +### 5. Opcional: Adicione Nós de Agentes + +Como os nós do servidor K3s são programáveis ​​por padrão, nós de agente não são necessários para um cluster HA K3s. No entanto, você pode desejar ter nós de agente dedicados para executar seus aplicativos e serviços. + +Unir nós de agente em um cluster HA é o mesmo que unir nós de agente em um único cluster de servidor. Você só precisa especificar a URL na qual o agente deve se registrar (um dos IPs do servidor ou um endereço de registro fixo) e o token que ele deve usar. + +```bash +K3S_TOKEN=SECRET k3s agent --server https://server-or-fixed-registration-address:6443 +``` diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/faq.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/faq.md new file mode 100644 index 000000000..a66a5260c --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/faq.md @@ -0,0 +1,72 @@ +--- +title: FAQ +--- + +As Perguntas Frequentes são atualizadas periodicamente e projetadas para responder às perguntas mais frequentes dos nossos usuários sobre os K3s. + +### O K3s é uma substituição adequada para o Kubernetes? + +K3s é uma distribuição Kubernetes certificada pela CNCF e pode fazer tudo o que é necessário para um cluster Kubernetes padrão. É apenas uma versão mais leve. Veja a página de documentação [main](./introduction.md) para mais detalhes. + +### Como posso usar meu próprio Ingress em vez do Traefik? + +Basta iniciar o servidor K3s com `--disable=traefik` e implantar seu ingress. + +### O K3s é compatível com Windows? + +No momento, o K3s não oferece suporte nativo ao Windows, mas estamos abertos a essa ideia no futuro. + +### O que exatamente são Servers e Agents? + +Para uma análise dos componentes que compõem um servidor e um agente, consulte a [página Arquitetura](./architecture.md). + +### Como posso compilar a partir do código-fonte? + +Consulte o K3s [BUILDING.md](https://github.com/k3s-io/k3s/blob/master/BUILDING.md) com instruções. + +### Onde estão os logs do K3s? + +A localização dos logs do K3s varia dependendo de como você executa o K3s e do sistema operacional do nó. + +* Quando executado a partir da linha de comando, os logs são enviados para stdout e stderr. +* Quando executado sob openrc, os logs serão criados em `/var/log/k3s.log`. +* Quando executado sob Systemd, os logs serão enviados para Journald e podem ser visualizados usando `journalctl -u k3s`. +* Os logs do pod podem ser encontrados em `/var/log/pods`. +* Os logs do Containerd podem ser encontrados em `/var/lib/rancher/k3s/agent/containerd/containerd.log`. + +Você pode gerar logs mais detalhados usando o sinalizador `--debug` ao iniciar o K3s (ou `debug: true` no arquivo de configuração). + +O Kubernetes usa uma estrutura de registro conhecida como `klog`, que usa uma única configuração de registro para todos os componentes dentro de um processo. +Como o K3s executa todos os componentes do Kubernetes dentro de um único processo, não é possível configurar diferentes níveis de registro ou destinos para componentes individuais do Kubernetes. +O uso dos argumentos de componente `-v=` ou `--vmodule==` provavelmente não terá o efeito desejado. + +Veja [Fontes Adicionais de Log](./advanced.md#additional-logging-sources) para ainda mais opções de registro. + +### Posso executar o K3s no Docker? + +Sim, há várias maneiras de executar K3s no Docker. Veja [Opções avançadas](./advanced.md#running-k3s-in-docker) para mais detalhes. + +### Qual é a diferença entre os Tokens de Servidor e de Agente do K3s? + +Para mais informações sobre como gerenciar os tokens de junção do K3s, consulte a [documentação do comando `k3s token`](./cli/token.md). + +### Quão compatíveis são as diferentes versões do K3s? + +Em geral, aplica-se a [política de variação de versão do Kubernetes](https://kubernetes.io/releases/version-skew-policy/). + +Resumindo, os servidores podem ser mais novos que os agentes, mas os agentes não podem ser mais novos que os servidores. + +### Estou com um problema. Onde posso obter ajuda? + +Se você estiver tendo problemas com a implantação de K3s, você deve: + +1) Verifique a página [Problemas conhecidos](./known-issues.md). + +2) Verifique se você resolveu qualquer [Preparação adicional do SO](./installation/requirements.md#operating-systems). Execute `k3s check-config` e certifique-se de que ele passe. + +3) Pesquise os [Problemas](https://github.com/k3s-io/k3s/issues) e [Discussões](https://github.com/k3s-io/k3s/discussions) do K3s para encontrar um que corresponda ao seu problema. + + +4) Entre no canal [Rancher Slack](https://slack.rancher.io/) do K3s para obter ajuda. + +5) Envie um [Novo problema](https://github.com/k3s-io/k3s/issues/new/choose) no K3s Github descrevendo sua configuração e o problema que você está enfrentando. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/figure.jsx b/i18n/pt-BR/docusaurus-plugin-content-docs/current/figure.jsx new file mode 100644 index 000000000..cb3966c19 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/figure.jsx @@ -0,0 +1,11 @@ +import React from 'react' +import useBaseUrl from '@docusaurus/useBaseUrl' + +export default function Figure({ src, caption }) { + return ( +
+ {caption} +
{`Figure: ${caption}`}
+
+ ) +} \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/helm.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/helm.md new file mode 100644 index 000000000..43d6f9086 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/helm.md @@ -0,0 +1,149 @@ +--- +title: Helm +--- + +O Helm é a ferramenta de gerenciamento de pacotes de escolha para o Kubernetes. Os Helm charts fornecem sintaxe de modelo para documentos de manifesto YAML do Kubernetes. Com o Helm, os desenvolvedores ou administradores de cluster podem criar modelos configuráveis ​​conhecidos como Charts, em vez de usar apenas manifestos estáticos. Para obter mais informações sobre como criar seu próprio catálogo Chart, confira os documentos em [https://helm.sh/docs/intro/quickstart/](https://helm.sh/docs/intro/quickstart/). + +O K3s não requer nenhuma configuração especial para suportar o Helm. Apenas certifique-se de ter definido corretamente o caminho do kubeconfig conforme a documentação [cluster access](./cluster-access.md). + +O K3s inclui um [Helm Controller](https://github.com/k3s-io/helm-controller/) que gerencia a instalação, atualização/reconfiguração e desinstalação de Helm charts usando uma Definição de Recurso Personalizada (CRD) do HelmChart. Emparelhado com [manifestos AddOn de implantação automática](./installation/packaged-components.md), a instalação de um Helm charts no seu cluster pode ser automatizada criando um único arquivo no disco. + +### Usando o Controlador Helm + +O [HelmChart Custom Resource](https://github.com/k3s-io/helm-controller#helm-controller) captura a maioria das opções que você normalmente passaria para a ferramenta de linha de comando `helm`. Aqui está um exemplo de como você pode implantar o Apache do repositório chart da Bitnami, substituindo alguns dos valores do chart padrão. Observe que o próprio recurso HelmChart está no namespace `kube-system`, mas os recursos do chart serão implantados no namespace `web`, que é criado no mesmo manifesto. Isso pode ser útil se você quiser manter seus recursos HelmChart separados dos recursos que eles implantam. + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: web +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: apache + namespace: kube-system +spec: + repo: https://charts.bitnami.com/bitnami + chart: apache + targetNamespace: web + valuesContent: |- + service: + type: ClusterIP + ingress: + enabled: true + hostname: www.example.com + metrics: + enabled: true +``` + +Um exemplo de implantação de um helm chart de um repositório privado com autenticação: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + namespace: kube-system + name: example-app +spec: + targetNamespace: example-space + createNamespace: true + version: v1.2.3 + chart: example-app + repo: https://secure-repo.example.com + authSecret: + name: example-repo-auth + repoCAConfigMap: + name: example-repo-ca + valuesContent: |- + image: + tag: v1.2.2 +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: kube-system + name: example-repo-auth +type: kubernetes.io/basic-auth +stringData: + username: user + password: pass +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: kube-system + name: example-repo-ca +data: + ca.crt: |- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +#### Definições de Campo do HelmChart + +| Campo | Padrão | Descrição | Argumento do Helm / Equivalente de sinalizador | +| ------------------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- | +| metadata.name | | Nome do Helm Chart | NOME | +| spec.chart | | Nome do Helm Chart no repositório ou URL HTTPS completa para o arquivo do gráfico (.tgz) | CHART | +| spec.targetNamespace | default | Namespace de destino do Helm Chart | `--namespace` | +| spec.createNamespace | false | Cria namespace de destino se não estiver presente | `--create-namespace` | +| spec.version | | Versão do Helm Chart (ao instalar do repositório) | `--version` | +| spec.repo | | URL do repositório do Helm Chart | `--repo` | +| spec.repoCA | | Verifica certificados de servidores habilitados para HTTPS usando este pacote CA. Deve ser uma sequência contendo um ou mais Certificados CA codificados em PEM. | `--ca-file` | +| spec.repoCAConfigMap | | Referência a um ConfigMap contendo Certificados CA confiáveis ​​pelo Helm. Pode ser usado junto com ou em vez de `repoCA` | `--ca-file` | +| spec.helmVersion | v3 | Versão do Helm a ser usada (`v2` ou `v3`) | | +| spec.bootstrap | False | Defina como True se este gráfico for necessário para inicializar o cluster (Cloud Controller Manager, etc.) | | +| spec.set | | Substitui valores de gráfico padrão simples. Eles têm precedência sobre opções definidas via valuesContent. | `--set` / `--set-string` | +| spec.jobImage | | Especifique a imagem a ser usada ao instalar o gráfico do Helm. Por exemplo, rancher/klipper-helm:v0.3.0 . | | +| spec.backOffLimit | 1000 | Especifique o número de tentativas antes de considerar que um trabalho falhou. | | +| spec.timeout | 300s | Tempo limite para operações do Helm, como uma [string de duração](https://pkg.go.dev/time#ParseDuration) (`300s`, `10m`, `1h`, etc) | `--timeout` | +| spec.failurePolicy | reinstall | Defina como `abort`, caso em que a operação do Helm é abortada, aguardando intervenção manual do operador. | | +| spec.authSecret | | Referência ao segredo do tipo `kubernetes.io/basic-auth` que contém credenciais de autenticação básica para o repositório Chart. | | +| spec.authPassCredentials | false | Passe credenciais de autenticação básica para todos os domínios. | `--pass-credentials` | +| spec.dockerRegistrySecret | | Referência ao segredo do tipo `kubernetes.io/dockerconfigjson` que contém credenciais de autenticação do Docker para o registro baseado em OCI atuando como o repositório Chart. | | +| spec.valuesContent | | Substituir valores Chart padrão complexos via conteúdo de arquivo YAML | `--values` | +| spec.chartContent | | Arquivo de chart codificado em Base64 .tgz - substitui spec.chart | CHART | + +O conteúdo colocado em `/var/lib/rancher/k3s/server/static/` pode ser acessado anonimamente por meio do Kubernetes APIServer de dentro do cluster. Esta URL pode ser modelada usando a variável especial `%{KUBERNETES_API}%` no campo `spec.chart`. Por exemplo, o componente Traefik empacotado carrega seu gráfico de `https://%{KUBERNETES_API}%/static/charts/traefik-12.0.000.tgz`. + +:::note +O campo `name` deve seguir as convenções de nomenclatura do gráfico Helm. Consulte a [documentação de práticas recomendadas do Helm](https://helm.sh/docs/chart_best_practices/conventions/#chart-names) para saber mais. +::: + +### Personalizando Componentes Empacotados com HelmChartConfig + +Para permitir a substituição de valores para componentes empacotados que são implantados como HelmCharts (como Traefik), o K3s oferece suporte à personalização de implantações por meio de recursos HelmChartConfig. O recurso HelmChartConfig deve corresponder ao nome e ao namespace do HelmChart correspondente e oferece suporte ao fornecimento de `valuesContent` adicional, que é passado para o comando `helm` como um arquivo de valor adicional. + +:::note +Os valores `spec.set` do HelmChart substituem as configurações `spec.valuesContent` do HelmChart e HelmChartConfig. +::: + +Por exemplo, para personalizar a configuração de entrada do Traefik empacotada, você pode criar um arquivo chamado `/var/lib/rancher/k3s/server/manifests/traefik-config.yaml` e preenchê-lo com o seguinte conteúdo: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: traefik + namespace: kube-system +spec: + valuesContent: |- + image: + name: traefik + tag: 2.9.10 + ports: + web: + forwardedHeaders: + trustedIPs: + - 10.0.0.0/8 +``` + +### Migrando do Helm v2 + +O K3s pode lidar com o Helm v2 ou o Helm v3. Se você deseja migrar para o Helm v3, [esta](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) postagem do blog do Helm explica como usar um plugin para migrar com sucesso. Consulte a documentação oficial do Helm 3 [aqui](https://helm.sh/docs/) para obter mais informações. Apenas certifique-se de ter definido corretamente seu kubeconfig conforme a seção sobre [acesso ao cluster.](./cluster-access.md) + +:::note +O Helm 3 não requer mais o Tiller e o comando `helm init`. Consulte a documentação oficial para obter detalhes. +::: \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/airgap.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/airgap.md new file mode 100644 index 000000000..7ae4dd14e --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/airgap.md @@ -0,0 +1,162 @@ +--- +title: "Instalação em Ambiente Isolado" +--- + +O K3s pode ser instalado em um ambiente isolado (air-gapped) de duas maneiras diferentes. Você pode implantar imagens através do [artefato de release tarball k3s-airgap-images](#manually-deploy-images-method) ou utilizando um [registro privado](#private-registry-method). Também é possível usar o [espelho de registro integrado](#embedded-registry-mirror), desde que haja pelo menos um membro do cluster que tenha acesso às imagens necessárias. + +## Importar Imagens + +### Método de Registro Privado + +Esses passos assumem que você já criou nós em seu ambiente isolado (air-gap), está utilizando o containerd embutido como runtime de contêiner e tem um registro privado compatível com OCI disponível em seu ambiente. + +Se você ainda não configurou um registro Docker privado, consulte a [documentação oficial do Registry](https://distribution.github.io/distribution/about/deploying/#run-an-externally-accessible-registry). + +#### Criar o YAML do Registro e Enviar Imagens + +1. Obtenha o arquivo de imagens para a sua arquitetura na página de [releases](https://github.com/k3s-io/k3s/releases) para a versão do K3s que você irá utilizar. +2. Use `docker image load k3s-airgap-images-amd64.tar.zst` para importar as imagens do arquivo tar para o Docker. +3. Use `docker tag` e `docker push` para retaguear e enviar as imagens carregadas para o seu registro privado. +4. Siga o guia de [Configuração de Registro Privado](private-registry.md) para criar e configurar o arquivo `registries.yaml`. +5. Prossiga para a seção [Instalar K3s](#install-k3s) abaixo. + +### Método de Implantação Manual de Imagens + +Essas etapas pressupõem que você já criou nós em seu ambiente air-gap, +está usando o containerd empacotado como o tempo de execução do contêiner +e não pode ou não quer usar um registro privado. + +Este método requer que você implante manualmente as imagens necessárias em cada nó e é apropriado para implantações de ponta onde executar um registro privado não é prático. + +#### Preparar o diretório de imagens e o tarball de imagens do Airgap + +1. Obtenha o arquivo de imagens para sua arquitetura na página [releases](https://github.com/k3s-io/k3s/releases) da versão do K3s que você executará. +2. Baixe o arquivo de imagens para o diretório de imagens do agente, por exemplo: + ```bash + sudo mkdir -p /var/lib/rancher/k3s/agent/images/ + sudo curl -L -o /var/lib/rancher/k3s/agent/images/k3s-airgap-images-amd64.tar.zst "https://github.com/k3s-io/k3s/releases/download/v1.29.1-rc2%2Bk3s1/k3s-airgap-images-amd64.tar.zst" + ``` +3. Prossiga para a seção [Instalar K3s](#install-k3s) abaixo. + +### Espelho de Registro Incorporado + +O K3s inclui um espelho de registro compatível com OCI distribuído incorporado. +Quando habilitado e configurado corretamente, as imagens disponíveis no armazenamento de imagens do containerd em qualquer nó +podem ser extraídas por outros membros do cluster sem acesso a um registro de imagem externo. + +As imagens espelhadas podem ser originadas de um registro upstream, espelho de registro ou tarball de imagem airgap. +Para obter mais informações sobre como habilitar o espelho de registro distribuído incorporado, consulte a documentação [Espelho de Registro Integrado](./registry-mirror.md). + +## Instalação K3s + +### Pré-requisitos + +Antes de instalar o K3s, conclua o [Método de Registro Privado](#private-registry-method) ou o [Método de Implantação Manual de Imagens](#manually-deploy-images-method) acima para preencher previamente as imagens que o K3s precisa instalar. + +#### Binários +- Baixe o binário K3s da página [releases](https://github.com/k3s-io/k3s/releases), correspondendo à mesma versão usada para obter as imagens airgap. Coloque o binário em `/usr/local/bin` em cada nó air-gapped e garanta que ele seja executável. +- Baixe o script de instalação do K3s em [get.k3s.io](https://get.k3s.io). Coloque o script de instalação em qualquer lugar em cada nó air-gapped e nomeie-o `install.sh`. + +#### Rota de Rede Padrão +Se seus nós não tiverem uma interface com uma rota padrão, uma rota padrão deve ser configurada; até mesmo uma rota black-hole por meio de uma interface fictícia será suficiente. O K3s requer uma rota padrão para detectar automaticamente o IP primário do nó e para que o roteamento ClusterIP do kube-proxy funcione corretamente. Para adicionar uma rota fictícia, faça o seguinte: + ``` + ip link add dummy0 type dummy + ip link set dummy0 up + ip addr add 203.0.113.254/31 dev dummy0 + ip route add default via 203.0.113.255 dev dummy0 metric 1000 + ``` + +Ao executar o script K3s com a variável de ambiente `INSTALL_K3S_SKIP_DOWNLOAD`, o K3s usará a versão local do script e do binário. + +#### SELinux RPM + +Se você pretende implantar o K3s com o SELinux habilitado, você também precisará instalar o RPM k3s-selinux apropriado em todos os nós. A versão mais recente do RPM pode ser encontrada [aqui](https://github.com/k3s-io/k3s-selinux/releases/latest). Por exemplo, no CentOS 8: + +```bash +On internet accessible machine: +curl -LO https://github.com/k3s-io/k3s-selinux/releases/download/v1.4.stable.1/k3s-selinux-1.4-1.el8.noarch.rpm + +# Transferir RPM para máquina isolada. +On air-gapped machine: +sudo yum install ./k3s-selinux-1.4-1.el8.noarch.rpm +``` + +Veja a seção [SELinux](../advanced.md#selinux-support) para mais informações. + +### Instalando o K3s em um Ambiente Isolado + +Você pode instalar o K3s em um ou mais servidores, conforme descrito abaixo. + + + + +Para instalar o K3s em um único servidor, basta fazer o seguinte no nó do servidor: + +```bash +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +Para adicionar agentes adicionais, faça o seguinte em cada nó de agente: + +```bash +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://:6443 K3S_TOKEN= ./install.sh +``` + +:::note +O token do servidor normalmente é encontrado em `/var/lib/rancher/k3s/server/token`. +::: + + + + +Consulte os guias [Alta disponibilidade com um BD externo](../datastore/ha.md) ou [Alta disponibilidade com BD incorporado](../datastore/ha-embedded.md). Você ajustará os comandos de instalação para especificar `INSTALL_K3S_SKIP_DOWNLOAD=true` e executar seu script de instalação localmente em vez de via curl. Você também utilizará `INSTALL_K3S_EXEC='args'` para fornecer quaisquer argumentos ao k3s. + +Por exemplo, a segunda etapa do guia Alta disponibilidade com um banco de dados externo menciona o seguinte: + +```bash +curl -sfL https://get.k3s.io | sh -s - server \ + --token=SECRET \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" +``` + +Em vez disso, você modificaria esses exemplos da seguinte forma: + +```bash +INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_EXEC='server --token=SECRET' \ +K3S_DATASTORE_ENDPOINT='mysql://username:password@tcp(hostname:3306)/database-name' \ +./install.sh +``` + + + + +:::note +A flag `--resolv-conf` do K3s é passada para o kubelet, o que pode ajudar na configuração da resolução de DNS dos pods em redes isoladas (air-gap), onde o host não possui servidores de nomes configurados para upstream. +::: + +## Atualização + +### Método de Instalação por Script + +A atualização de um ambiente isolado pode ser realizada da seguinte maneira: + +1. Baixe as novas imagens air-gap (arquivo tar) da página [releases](https://github.com/k3s-io/k3s/releases) para a versão do K3s para a qual você fará o upgrade. Coloque o tar no diretório `/var/lib/rancher/k3s/agent/images/` em cada +nó. Exclua o arquivo tar antigo. +2. Copie e substitua o binário antigo do K3s em `/usr/local/bin` em cada nó. Copie o script de instalação em https://get.k3s.io (pois é possível que ele tenha mudado desde o último lançamento). Execute o script novamente, assim como você fez no passado +com as mesmas variáveis ​​de ambiente. +3. Reinicie o serviço K3s (se não for reiniciado automaticamente pelo instalador). + +### Método de Atualizações Automatizadas + +O K3s suporta [atualizações automatizadas](../upgrades/automated.md). Para habilitar isso em ambientes com air-gapped, você deve garantir que as imagens necessárias estejam disponíveis em seu registro privado. + +Você precisará da versão do rancher/k3s-upgrade que corresponde à versão do K3s para a qual pretende fazer o upgrade. Observe que a tag de imagem substitui o `+` na versão do K3s por um `-` porque as imagens do Docker não suportam `+`. + +Você também precisará das versões do system-upgrade-controller e do kubectl que são especificadas no manifesto YAML do system-upgrade-controller que você implantará. Verifique a versão mais recente do system-upgrade-controller [aqui](https://github.com/rancher/system-upgrade-controller/releases/latest) e baixe o system-upgrade-controller.yaml para determinar as versões que você precisa enviar para seu registro privado. Por exemplo, na versão v0.4.0 do system-upgrade-controller, essas imagens são especificadas no manifesto YAML: + +``` +rancher/system-upgrade-controller:v0.4.0 +rancher/kubectl:v0.17.0 +``` + +Depois de adicionar as imagens necessárias rancher/k3s-upgrade, rancher/system-upgrade-controller e rancher/kubectl ao seu registro privado, siga o guia [atualizações automatizadas](../upgrades/automated.md). diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/configuration.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/configuration.md new file mode 100644 index 000000000..fae2dd255 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/configuration.md @@ -0,0 +1,193 @@ +--- +title: "Opções de Configuração" +--- + +Esta página se concentra nas opções comumente usadas ao configurar o K3s pela primeira vez. Consulte a documentação sobre [Opções e Configuração Avançadas](../advanced.md) e a documentação dos comandos [server](../cli/server.md) e [agent](../cli/agent.md) para uma cobertura mais detalhada. + +## Configuração com o script de instalação + +Conforme mencionado no [Guia de Início Rápido](../quick-start.md), você pode usar o script de instalação disponível em https://get.k3s.io para instalar o K3s como um serviço em sistemas baseados em systemd e openrc. + +Você pode usar uma combinação de `INSTALL_K3S_EXEC`, variáveis de ambiente `K3S_` e flags de comando para passar configurações para a configuração do serviço. +As variáveis de ambiente prefixadas, o valor de `INSTALL_K3S_EXEC` e os argumentos adicionais fornecidos no shell são todos persistidos na configuração do serviço. Após a instalação, a configuração pode ser alterada editando o arquivo de ambiente, modificando a configuração do serviço ou simplesmente executando novamente o instalador com novas opções. + + +Para ilustrar isso, os seguintes comandos resultam no mesmo comportamento: registrar um servidor sem o Flannel e com um token: + +```bash +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server" sh -s - --flannel-backend none --token 12345 +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --flannel-backend none" K3S_TOKEN=12345 sh -s - +curl -sfL https://get.k3s.io | K3S_TOKEN=12345 sh -s - server --flannel-backend none +# O servidor é assumido abaixo porque não há K3S_URL +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-backend none --token 12345" sh -s - +curl -sfL https://get.k3s.io | sh -s - --flannel-backend none --token 12345 +``` + +Ao registrar um agente, os seguintes comandos resultam no mesmo comportamento: + +```bash +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent --server https://k3s.example.com --token mypassword" sh -s - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent" K3S_TOKEN="mypassword" sh -s - --server https://k3s.example.com +curl -sfL https://get.k3s.io | K3S_URL=https://k3s.example.com sh -s - agent --token mypassword +curl -sfL https://get.k3s.io | K3S_URL=https://k3s.example.com K3S_TOKEN=mypassword sh -s - # O agente é assumido devido à presença de K3S_URL +``` + +Para detalhes sobre todas as variáveis de ambiente, consulte [Variáveis de Ambiente.](../reference/env-variables.md) + +:::info Note +Se você definir configurações ao executar o script de instalação, mas não as definir novamente ao reexecutar o script, os valores originais serão perdidos. + +O conteúdo do [arquivo de configuração](#configuration-file) não é gerenciado pelo script de instalação. +Se você deseja que sua configuração seja independente do script de instalação, deve usar um arquivo de configuração em vez de passar variáveis de ambiente ou argumentos ao script de instalação. +::: + +## Configuração com binário + +Conforme mencionado, o script de instalação está principalmente focado em configurar o K3s para ser executado como um serviço. +Se você optar por não usar o script, pode executar o K3s simplesmente baixando o binário da nossa [página de lançamentos](https://github.com/k3s-io/k3s/releases/latest), colocando-o no seu PATH e executando-o. Isso não é particularmente útil para instalações permanentes, mas pode ser prático ao realizar testes rápidos que não justifiquem gerenciar o K3s como um serviço do sistema. + +```bash +curl -Lo /usr/local/bin/k3s https://github.com/k3s-io/k3s/releases/download/v1.26.5+k3s1/k3s; chmod a+x /usr/local/bin/k3s +``` + +Você pode passar configurações definindo variáveis de ambiente `K3S_`: +```bash +K3S_KUBECONFIG_MODE="644" k3s server +``` + +Ou flags de comando: +```bash +k3s server --write-kubeconfig-mode=644 +``` + +O agent K3s também pode ser configurado dessa forma: +```bash +k3s agent --server https://k3s.example.com --token mypassword +``` + +Para detalhes sobre como configurar o servidor K3s, consulte a [documentação do `k3s server`](../cli/server.md). +Para detalhes sobre como configurar o agente K3s, consulte a [documentação do `k3s agent`](../cli/agent.md). +Você também pode usar a flag `--help` para ver uma lista de todas as opções disponíveis e suas respectivas variáveis de ambiente. + +:::info Flags Correspondentes +É importante que flags críticas sejam iguais em todos os nós do servidor. Por exemplo, se você usar a flag +`--disable servicelb` ou `--cluster-cidr=10.200.0.0/16` no nó master, mas não configurá-la nos outros nós do servidor, esses nós não conseguirão se conectar. Eles exibirão erros como: +`failed to validate server configuration: critical configuration value mismatch.` + +Consulte a documentação de Configuração do Servidor (linkada acima) para mais informações sobre quais flags devem ser configuradas de forma idêntica nos nós do servidor. +::: + +## Arquivo de Configuração + +:::info Controle de Versão +Disponível a partir da versão [v1.19.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.19.1%2Bk3s1) +::: + +Além de configurar o K3s com variáveis de ambiente e argumentos de linha de comando, também é possível usar um arquivo de configuração. + +Por padrão, os valores presentes em um arquivo YAML localizado em `/etc/rancher/k3s/config.yaml` serão utilizados durante a instalação. + +Abaixo está um exemplo básico de arquivo de configuração para o `server`: + +```yaml +write-kubeconfig-mode: "0644" +tls-san: + - "foo.local" +node-label: + - "foo=bar" + - "something=amazing" +cluster-init: true +``` + +Isso é equivalente aos seguintes argumentos de linha de comando: + +```bash +k3s server \ + --write-kubeconfig-mode "0644" \ + --tls-san "foo.local" \ + --node-label "foo=bar" \ + --node-label "something=amazing" \ + --cluster-init +``` + +De forma geral, os argumentos da linha de comando correspondem às suas respectivas chaves no YAML, com argumentos repetíveis sendo representados como listas no YAML. Flags booleanas são representadas como `true` ou `false` no arquivo YAML. + +Também é possível usar tanto um arquivo de configuração quanto argumentos de linha de comando. Nesses casos, os valores serão carregados de ambas as fontes, mas os argumentos da linha de comando terão precedência. Para argumentos repetíveis, como `--node-label`, os argumentos da linha de comando substituirão todos os valores na lista. + +Por fim, a localização do arquivo de configuração pode ser alterada usando o argumento de linha de comando `--config FILE, -c FILE` ou a variável de ambiente `$K3S_CONFIG_FILE`. + +### Múltiplos Arquivos de Configuração + +:::info Limite de Versão +Disponível a partir da versão [v1.21.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.21.0%2Bk3s1) +::: + +Múltiplos arquivos de configuração são suportados. Por padrão, os arquivos de configuração são lidos de `/etc/rancher/k3s/config.yaml` e `/etc/rancher/k3s/config.yaml.d/*.yaml` em ordem alfabética. + +Por padrão, o último valor encontrado para uma determinada chave será utilizado. Um + pode ser adicionado à chave para anexar o valor à string ou lista existente, em vez de substituí-lo. Todas as ocorrências dessa chave em arquivos subsequentes também precisarão de um + para evitar a sobrescrição do valor acumulado. + +Abaixo está um exemplo de múltiplos arquivos de configuração: + +```yaml +# config.yaml +token: boop +node-label: + - foo=bar + - bar=baz + + +# config.yaml.d/test1.yaml +write-kubeconfig-mode: 600 +node-taint: + - alice=bob:NoExecute + +# config.yaml.d/test2.yaml +write-kubeconfig-mode: 777 +node-label: + - other=what + - foo=three +node-taint+: + - charlie=delta:NoSchedule + +``` + +Isso resulta em uma configuração final de: + +```yaml +write-kubeconfig-mode: 777 +token: boop +node-label: + - other=what + - foo=three +node-taint: + - alice=bob:NoExecute + - charlie=delta:NoSchedule +``` + +## Colocando tudo junto + +Todas as opções mencionadas acima podem ser combinadas em um único exemplo. + +Um arquivo `config.yaml` é criado em `/etc/rancher/k3s/config.yaml`: + +```yaml +token: "secret" +debug: true +``` + +Em seguida, o script de instalação é executado com uma combinação de variáveis de ambiente e flags: + +```bash +curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" INSTALL_K3S_EXEC="server" sh -s - --flannel-backend none +``` + +Ou, se você já instalou o binário do K3s: +```bash +K3S_KUBECONFIG_MODE="644" k3s server --flannel-backend none +``` + +Isso resulta em um servidor com: +- Um arquivo kubeconfig com permissões `644` +- Backend Flannel definido como `none` +- O token configurado como `secret` +- Log de depuração habilitado diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/installation.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/installation.md new file mode 100644 index 000000000..773faea88 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/installation.md @@ -0,0 +1,19 @@ +--- +title: "Instalação" +--- + +Esta seção contém instruções para instalar o K3s em diversos ambientes. Certifique-se de atender aos [Requisitos](requirements.md) antes de começar a instalação do K3s. + +[Opções de Configuração](configuration.md) fornece orientações sobre as opções disponíveis para você ao instalar o K3s. + +[Configuração de Registro Privado](private-registry.md) aborda o uso do arquivo registries.yaml para configurar espelhos de registros de imagens de contêiner. + +[Espelho Incorporado](registry-mirror.md) mostra como habilitar o espelho distribuído de registro de imagens incorporado. + +[Instalação em Ambiente Isolado](airgap.md) detalha como configurar o K3s em ambientes que não têm acesso direto à Internet. + +[Gerenciando Funções do Servidor](server-roles.md) detalha como configurar o K3s com servidores dedicados para `control-plane` ou `etcd`. + +[Gerenciando Componentes Empacotados](packaged-components.md) detalha como desativar componentes empacotados ou instalar os seus próprios utilizando manifests de implantação automática. + +[Desinstalando o K3s](uninstall.md) detalha como remover o K3s de um host. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/packaged-components.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/packaged-components.md new file mode 100644 index 000000000..d1afa696b --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/packaged-components.md @@ -0,0 +1,68 @@ +--- +title: "Gerenciando Componentes Empacotados" +--- + +## Implantação Automática de Manifests (AddOns) + +Em nós de servidor, qualquer arquivo encontrado em `/var/lib/rancher/k3s/server/manifests` será automaticamente implantado no Kubernetes de uma maneira similar a `kubectl apply`, tanto na inicialização quanto quando o arquivo for alterado no disco. Excluir arquivos deste diretório não excluirá os recursos correspondentes do cluster. + +Os manifestos são rastreados como recursos personalizados `AddOn` no namespace `kube-system`. Quaisquer erros ou avisos encontrados ao aplicar o arquivo manifesto podem ser vistos usando `kubectl describe` no `AddOn` correspondente ou usando `kubectl get event -n kube-system` para visualizar todos os eventos para esse namespace, incluindo aqueles do controlador de implantação. + +### Componentes Empacotados + +O K3s vem com vários componentes empacotados que são implantados como AddOns por meio do diretório manifests: `coredns`, `traefik`, `local-storage` e `metrics-server`. O controlador LoadBalancer `servicelb` incorporado não tem um arquivo manifest, mas pode ser desabilitado como se fosse um `AddOn` por motivos históricos. + +Manifestos para componentes empacotados são gerenciados pelo K3s e não devem ser alterados. Os arquivos são reescritos no disco sempre que o K3s é iniciado, para garantir sua integridade. + +### AddOns do Usuário + +Você pode colocar arquivos adicionais no diretório manifests para implantação como um `AddOn`. Cada arquivo pode conter vários recursos do Kubernetes, delimitados pelo separador de documentos YAML `---`. Para obter mais informações sobre como organizar recursos em manifests, consulte a seção [Gerenciando recursos](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/) da documentação do Kubernetes. + +#### Requisitos de Nomeação de Arquivos + +O nome `AddOn` para cada arquivo no diretório manifest é derivado do nome base do arquivo. +Certifique-se de que todos os arquivos dentro do diretório manifests (ou dentro de quaisquer subdiretórios) tenham nomes exclusivos e sigam as [restrições de nomenclatura de objetos](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/) do Kubernetes. +Também deve-se tomar cuidado para não entrar em conflito com nomes em uso pelos componentes empacotados padrão do K3s, mesmo que esses componentes estejam desabilitados. + +Aqui está um exemplo de um erro que seria relatado se o nome do arquivo contivesse sublinhados: +> `Failed to process config: failed to process /var/lib/rancher/k3s/server/manifests/example_manifest.yaml: + Addon.k3s.cattle.io "example_manifest" is invalid: metadata.name: Invalid value: "example_manifest": + a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character + (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')` + +:::danger +Se você tiver vários nós de servidor e colocar manifestos AddOn adicionais em mais de um servidor, é sua responsabilidade garantir que os arquivos permaneçam sincronizados entre esses nós. O K3s não sincroniza o conteúdo do AddOn entre os nós e não pode garantir o comportamento correto se servidores diferentes tentarem implantar manifestos conflitantes. +::: + +## Desabilitando Manifests + +Há duas maneiras de desabilitar a implantação de conteúdo específico do diretório de manifestos. + +### Usando a flag `--disable` + +Os AddOns para componentes empacotados listados acima, além dos AddOns para quaisquer manifestos adicionais colocados no diretório `manifests`, podem ser desabilitados com o sinalizador `--disable`. Os AddOns desabilitados são ativamente desinstalados do cluster, e os arquivos de origem são excluídos do diretório `manifests`. + +Por exemplo, para desabilitar o traefik de ser instalado em um novo cluster, ou para desinstalá-lo e remover o manifesto de um cluster existente, você pode iniciar o K3s com `--disable=traefik`. Vários itens podem ser desabilitados separando seus nomes com vírgulas, ou repetindo o sinalizador. + +### Usando arquivos .skip + +Para qualquer arquivo em `/var/lib/rancher/k3s/server/manifests`, você pode criar um arquivo `.skip` que fará com que o K3s ignore o manifesto correspondente. O conteúdo do arquivo `.skip` não importa, apenas sua existência é verificada. Observe que criar um arquivo `.skip` após um AddOn já ter sido criado não o removerá ou modificará de outra forma ou os recursos que ele criou; o arquivo é simplesmente tratado como se não existisse. + +Por exemplo, criar um arquivo `traefik.yaml.skip` vazio no diretório de manifestos antes do K3s ser iniciado pela primeira vez fará com que o K3s ignore a implantação de `traefik.yaml`: +```bash +$ ls /var/lib/rancher/k3s/server/manifests +ccm.yaml local-storage.yaml rolebindings.yaml traefik.yaml.skip +coredns.yaml traefik.yaml + +$ kubectl get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system local-path-provisioner-64ffb68fd-xx98j 1/1 Running 0 74s +kube-system metrics-server-5489f84d5d-7zwkt 1/1 Running 0 74s +kube-system coredns-85cb69466-vcq7j 1/1 Running 0 74s +``` + +Se o Traefik já tivesse sido implantado antes da criação do arquivo `traefik.skip`, o Traefik permaneceria como está e não seria afetado por atualizações futuras quando o K3s fosse atualizado. + +## Helm AddOns + +Para obter informações sobre como gerenciar gráficos do Helm por meio de manifestos de implantação automática, consulte a seção sobre [Helm.](../helm.md) \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/private-registry.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/private-registry.md new file mode 100644 index 000000000..39f1a1ea1 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/private-registry.md @@ -0,0 +1,267 @@ +--- +title: "Configuração de Registro Privado" +--- + +O Containerd pode ser configurado para se conectar a registros privados e usá-los para puxar imagens conforme necessário pelo kubelet. + +Ao iniciar, o K3s verificará se o arquivo `/etc/rancher/k3s/registries.yaml` existe. Se existir, a configuração do registro contida nesse arquivo será usada ao gerar a configuração do Containerd. +* Se você quiser usar um registro privado como espelho de um registro público, como o docker.io, será necessário configurar o arquivo `registries.yaml` em cada nó que você deseja usar o espelho. +* Se o seu registro privado exigir autenticação, usar certificados TLS personalizados ou não utilizar TLS, será necessário configurar o arquivo `registries.yaml` em cada nó que puxará imagens do seu registro. + +Observe que os nós do servidor são agendáveis por padrão. Se você não aplicou "taints" nos nós do servidor e pretende executar cargas de trabalho neles, certifique-se de criar o arquivo `registries.yaml` em cada servidor também. + +## Recuperação de Endpoint Padrão + +O Containerd possui um "endpoint padrão" implícito para todos os registros. +O endpoint padrão sempre será tentado como último recurso, mesmo que existam outros endpoints listados para esse registro no arquivo `registries.yaml`. +Reescritas não são aplicadas a pulls feitos contra o endpoint padrão. +Por exemplo, ao puxar `registry.example.com:5000/rancher/mirrored-pause:3.6`, o Containerd usará um endpoint padrão de `https://registry.example.com:5000/v2`. +* O endpoint padrão para `docker.io` é `https://index.docker.io/v2`. +* O endpoint padrão para todos os outros registros é `https:///v2`, onde `` é o nome do host do registro e a porta opcional. + +Para ser reconhecido como um registro, o primeiro componente do nome da imagem deve conter pelo menos um ponto ou dois-pontos. +Por razões históricas, imagens sem um registro especificado em seus nomes são implicitamente identificadas como provenientes de `docker.io`. + +:::info Nota de Versão +A opção `--disable-default-registry-endpoint` está disponível como um recurso experimental a partir das versões de janeiro de 2024: v1.26.13+k3s1, v1.27.10+k3s1, v1.28.6+k3s1, v1.29.1+k3s1. +::: + +Os nós podem ser iniciados com a opção `--disable-default-registry-endpoint`. +Quando essa opção é configurada, o Containerd não recorrerá ao endpoint padrão do registro e puxará apenas de endpoints de espelho configurados, juntamente com o registro distribuído, se estiver habilitado. + +Isso pode ser desejável se o seu cluster estiver em um ambiente verdadeiramente isolado (air-gapped), onde o registro upstream não está disponível, ou se você desejar que apenas alguns nós puxem do registro upstream. + +Desabilitar o endpoint padrão do registro aplica-se apenas aos registros configurados via `registries.yaml`. +Se o registro não for explicitamente configurado por meio de uma entrada de espelho no `registries.yaml`, o comportamento padrão de fallback ainda será utilizado. + +## Arquivo de Configuração de Registros + +O arquivo consiste em duas chaves principais, com subchaves para cada registro: + +```yaml +mirrors: + : + endpoint: + - https:///v2 +configs: + : + auth: + username: + password: + token: + tls: + ca_file: + cert_file: + key_file: + insecure_skip_verify: +``` + +### Mirrors + +A seção de mirrors define os nomes e endpoints dos registros, por exemplo: + +``` +mirrors: + registry.example.com: + endpoint: + - "https://registry.example.com:5000" +``` + +Cada espelho deve ter um nome e um conjunto de endpoints. Ao puxar uma imagem de um registro, o Containerd tentará esses URLs de endpoint, além do endpoint padrão, e usará o primeiro que funcionar. + +#### Redirecionamentos + +Se o registro privado for usado como um espelho para outro registro, como ao configurar um [cache de pull-through](https://docs.docker.com/registry/recipes/mirror/), os pulls de imagens são redirecionados de forma transparente para os endpoints listados. O nome do registro original é passado para o endpoint do espelho por meio do parâmetro de consulta `ns`. + +Por exemplo, se você tiver um espelho configurado para `docker.io`: + +```yaml +mirrors: + docker.io: + endpoint: + - "https://registry.example.com:5000" +``` + +Então, ao puxar `docker.io/rancher/mirrored-pause:3.6`, a imagem será puxada de forma transparente como `registry.example.com:5000/rancher/mirrored-pause:3.6`. + +#### Reescritas + +Cada espelho pode ter um conjunto de reescritas, que utilizam expressões regulares para corresponder e transformar o nome de uma imagem quando ela é puxada de um espelho. +Isso é útil se a estrutura de organização/projeto no registro privado for diferente do registro que está sendo espelhado. +As reescritas correspondem e transformam apenas o nome da imagem, NÃO a tag. + +Por exemplo, a seguinte configuração puxaria de forma transparente a imagem `docker.io/rancher/mirrored-pause:3.6` como `registry.example.com:5000/mirrorproject/rancher-images/mirrored-pause:3.6`: + +```yaml +mirrors: + docker.io: + endpoint: + - "https://registry.example.com:5000" + rewrite: + "^rancher/(.*)": "mirrorproject/rancher-images/$1" +``` + +:::info Nota de Versão +Reescritas não são mais aplicadas ao [Endpoint Padrão](#default-endpoint-fallback) a partir das versões de janeiro de 2024: v1.26.13+k3s1, v1.27.10+k3s1, v1.28.6+k3s1, v1.29.1+k3s1. +Antes dessas versões, as reescritas também eram aplicadas ao endpoint padrão, o que impedia o K3s de puxar do registro upstream caso a imagem não pudesse ser puxada de um endpoint espelho e não estivesse disponível sob o nome modificado no upstream. +::: + +Se você quiser aplicar reescritas ao puxar diretamente de um registro — quando ele não está sendo usado como espelho de um registro upstream diferente —, é necessário fornecer um endpoint de espelho que não corresponda ao endpoint padrão. +Os endpoints de espelho no arquivo `registries.yaml` que correspondem ao endpoint padrão são ignorados; o endpoint padrão sempre será tentado por último, sem reescritas, caso o fallback não tenha sido desativado. + +Por exemplo, se você tiver um registro em `https://registry.example.com/` e quiser aplicar reescritas ao puxar explicitamente `registry.example.com/rancher/mirrored-pause:3.6`, pode adicionar um endpoint de espelho com a porta especificada. +Como o endpoint do espelho não corresponde ao endpoint padrão — **`"https://registry.example.com:443/v2" != "https://registry.example.com/v2"`** —, o endpoint é aceito como um espelho, e as reescritas são aplicadas, apesar de ser, efetivamente, o mesmo que o padrão. + +```yaml +mirrors: + registry.example.com + endpoint: + - "https://registry.example.com:443" + rewrites: + "^rancher/(.*)": "mirrorproject/rancher-images/$1" +``` + + +Observe que, ao usar espelhos e reescritas, as imagens ainda serão armazenadas sob o nome original. +Por exemplo, `crictl image ls` mostrará `docker.io/rancher/mirrored-pause:3.6` como disponível no nó, mesmo que a imagem tenha sido puxada de um espelho com um nome diferente. + +### Configurações + +A seção `configs` define a configuração de TLS e credenciais para cada espelho. Para cada espelho, você pode definir `auth` e/ou `tls`. + +A parte `tls` consiste em: + +| Diretiva | Descrição | +| ---------------------- | ------------------------------------------------------------------------------------------------------- | +| `cert_file` | O caminho do certificado do cliente que será usado para autenticar com o registro. | +| `key_file` | O caminho da chave do cliente que será usado para autenticar com o registro. | +| `ca_file` | Define o caminho do certificado CA que será usado para verificar o certificado do servidor do registro. | +| `insecure_skip_verify` | Booleano que define se a verificação TLS deve ser ignorada para o registro. | + +A parte `auth` consiste em nome de usuário/senha ou token de autenticação: + +| Diretiva | Descrição | +| ---------- | -------------------------------------------------------------------- | +| `username` | nome de usuário para a autenticação básica do registro privado | +| `password` | senha do usuário para a autenticação básica do registro privado | +| `auth` | token de autenticação para a autenticação básica do registro privado | + +Abaixo estão exemplos básicos de uso de registros privados em diferentes modos: + +### Suporte a Curingas + +:::info Nota de Versão +O suporte a curingas está disponível a partir das versões de março de 2024: v1.26.15+k3s1, v1.27.12+k3s1, v1.28.8+k3s1, v1.29.3+k3s1 +::: + +A entrada com curinga `"*"` pode ser usada nas seções `mirrors` e `configs` para fornecer uma configuração padrão para todos os registros. +A configuração padrão será usada apenas se não houver uma entrada específica para esse registro. Observe que o asterisco DEVE estar entre aspas. + +No exemplo a seguir, um espelho de registro local será usado para todos os registros. A verificação TLS será desativada para todos os registros, exceto `docker.io`. + +```yaml +mirrors: + "*": + endpoint: + - "https://registry.example.com:5000" +configs: + "docker.io": + "*": + tls: + insecure_skip_verify: true +``` + +### Com TLS + +Abaixo estão exemplos que mostram como configurar `/etc/rancher/k3s/registries.yaml` em cada nó ao usar TLS. + + + + +```yaml +mirrors: + docker.io: + endpoint: + - "https://registry.example.com:5000" +configs: + "registry.example.com:5000": + auth: + username: xxxxxx # este é o nome de usuário do registro + password: xxxxxx # esta é a senha do usuário do registro + tls: + cert_file: # caminho para o arquivo de cert usado no registro + key_file: # caminho para o arquivo key usado no registro + ca_file: # camninho para o arquivo ca usado no registro +``` + + + + +```yaml +mirrors: + docker.io: + endpoint: + - "https://registry.example.com:5000" +configs: + "registry.example.com:5000": + tls: + cert_file: # caminho para o arquivo de cert usado no registro + key_file: # caminho para o arquivo key usado no registro + ca_file: # camninho para o arquivo ca usado no registro +``` + + + +### Sem TLS + +Abaixo estão exemplos que mostram como configurar `/etc/rancher/k3s/registries.yaml` em cada nó quando _não_ estiver usando TLS. + + + + +```yaml +mirrors: + docker.io: + endpoint: + - "http://registry.example.com:5000" +configs: + "registry.example.com:5000": + auth: + username: xxxxxx # este é o nome de usuário do registro + password: xxxxxx # esta é a senha do usuário do registro +``` + + + + +```yaml +mirrors: + docker.io: + endpoint: + - "http://registry.example.com:5000" +``` + + + +> No caso de comunicação sem TLS, é necessário especificar `http://` para os endpoints, caso contrário, será usado https por padrão. + +Para que as alterações no registro entrem em vigor, é necessário reiniciar o K3s em cada nó. + +## Solução de Problemas com Pull de Imagens + +Quando o Kubernetes encontra problemas ao puxar uma imagem, o erro exibido pelo kubelet pode refletir apenas o erro final retornado pela tentativa de pull feita no endpoint padrão, fazendo parecer que os endpoints configurados não estão sendo utilizados. + +Verifique o log do Containerd no nó em `/var/lib/rancher/k3s/agent/containerd/containerd.log` para obter informações detalhadas sobre a causa raiz da falha. + +## Adicionando Imagens ao Registro Privado + +Espelhar imagens para um registro privado requer um host com Docker ou outras ferramentas de terceiros capazes de puxar e enviar imagens. +Os passos abaixo presumem que você possui um host com o dockerd, ferramentas de linha de comando do Docker e acesso tanto ao docker.io quanto ao seu registro privado. + +1. Obtenha o arquivo `k3s-images.txt` do GitHub para a versão com a qual você está trabalhando. +2. Puxe cada uma das imagens do K3s listadas no arquivo k3s-images.txt do docker.io. + Exemplo: `docker pull docker.io/rancher/mirrored-pause:3.6` +3. Retague as imagens para o registro privado. + Exemplo: `docker tag docker.io/rancher/mirrored-pause:3.6 registry.example.com:5000/rancher/mirrored-pause:3.6` +4. Envie as imagens para o registro privado. + Exemplo: `docker push registry.example.com:5000/rancher/mirrored-pause:3.6` diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/registry-mirror.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/registry-mirror.md new file mode 100644 index 000000000..7aac5f6a7 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/registry-mirror.md @@ -0,0 +1,132 @@ +--- +title: "Espelho de Registro Integrado" +--- + +:::info Nota de Versão +O Espelho de Registro Integrado está disponível como um recurso experimental a partir das versões de janeiro de 2024: v1.26.13+k3s1, v1.27.10+k3s1, v1.28.6+k3s1, v1.29.1+k3s1, e como recurso estável (GA) a partir das versões de dezembro de 2024: v1.29.12+k3s1, v1.30.8+k3s1, v1.31.4+k3s1. +::: + + +O K3s incorpora o [Spegel](https://github.com/spegel-org/spegel), um espelho de registro OCI distribuído e sem estado, que permite o compartilhamento peer-to-peer de imagens de contêiner entre os nós em um cluster Kubernetes. O espelho de registro distribuído está desativado por padrão. Para que o K3s o utilize, você deve ativar tanto o [Espelho de Registro OCI Distribuído](#enabling-the-distributed-oci-registry-mirror) quanto o [Espelhamento de Registro](#enabling-registry-mirroring), conforme explicado nas subseções a seguir. + +## Ativando o Espelho de Registro OCI Distribuído + +Para ativar o espelho de registro integrado, os nós do servidor devem ser iniciados com a flag `--embedded-registry` ou com `embedded-registry: true` no arquivo de configuração. +Essa opção habilita o espelho integrado para uso em todos os nós do cluster. + +Quando ativado em nível de cluster, todos os nós hospedarão um registro OCI local na porta 6443 e publicarão uma lista de imagens disponíveis por meio de uma rede peer-to-peer na porta 5001. +Qualquer imagem disponível no armazenamento de imagens do Containerd em qualquer nó pode ser puxada por outros membros do cluster sem necessidade de acesso a um registro externo. +Imagens importadas via arquivos [tar de imagens para ambiente isolado](./airgap.md#manually-deploy-images-method) são fixadas no Containerd para garantir que permaneçam disponíveis e não sejam removidas pela coleta de lixo do Kubelet. + +A porta peer-to-peer pode ser alterada de 5001 configurando a variável de ambiente `K3S_P2P_PORT` para o serviço K3s. A porta deve ser configurada com o mesmo valor em todos os nós. +Alterar a porta não é suportado e não é recomendável. + +### Requisitos + +Quando o espelho de registro integrado está habilitado, todos os nós devem ser capazes de se comunicar entre si por meio de seus endereços IP internos, nas portas TCP 5001 e 6443. +Se os nós não puderem se comunicar, pode demorar mais para as imagens serem puxadas, já que o registro distribuído será tentado primeiro pelo Containerd, antes de recorrer a outros endpoints. + +## Ativando o Espelhamento de Registro + +Ativar o espelhamento para um registro permite que um nó tanto puxe imagens desse registro de outros nós quanto compartilhe as imagens do registro com outros nós. +Se um registro for ativado para espelhamento em alguns nós, mas não em outros, apenas os nós com o espelhamento ativado trocarão imagens desse registro. + +Para ativar o espelhamento de imagens de um registro de contêiner upstream, os nós devem ter uma entrada na seção `mirrors` do arquivo `registries.yaml` para esse registro. +O registro não precisa ter nenhum endpoint listado, apenas precisa estar presente. +Por exemplo, para habilitar o espelhamento distribuído de imagens de `docker.io` e `registry.k8s.io`, configure o `registries.yaml` com o seguinte conteúdo em todos os nós do cluster: + +```yaml +mirrors: + docker.io: + registry.k8s.io: +``` + +Endpoints para espelhos de registros também podem ser adicionados como de costume. +Na configuração a seguir, as tentativas de pull de imagens tentarão primeiro o espelho integrado, depois `mirror.example.com` e, finalmente, `docker.io`: +```yaml +mirrors: + docker.io: + endpoint: + - https://mirror.example.com +``` + +Se você estiver usando um registro privado diretamente, em vez de como espelho de um registro upstream, pode habilitar o espelhamento distribuído da mesma forma que os registros públicos são habilitados — listando-o na seção de espelhos: +```yaml +mirrors: + mirror.example.com: +``` + +:::info Nota de Versão +O suporte a curingas está disponível a partir das versões de março de 2024: v1.26.15+k3s1, v1.27.12+k3s1, v1.28.8+k3s1, v1.29.3+k3s1. +::: + +A entrada de espelho com curinga `"*"` pode ser usada para habilitar o espelhamento distribuído de todos os registros. Observe que o asterisco DEVE estar entre aspas: +```yaml +mirrors: + "*": +``` + +Se nenhum registro estiver habilitado para espelhamento em um nó, esse nó não participará do registro distribuído de nenhuma forma. + +Para mais informações sobre a estrutura do arquivo `registries.yaml`, consulte [Configuração de Registro Privado](./private-registry.md). + +### Fallback de Endpoint Padrão + +Por padrão, o containerd recorrerá ao endpoint padrão ao puxar de registros com endpoints de espelho configurados. Se você quiser desabilitar isso e puxar imagens apenas dos espelhos configurados e/ou do espelho integrado, consulte a seção [Fallback de Endpoint Padrão](./private-registry.md#default-endpoint-fallback) da documentação de Configuração de Registro Privado. + +Observe que, se você estiver usando a opção `--disable-default-endpoint` e quiser permitir o pull diretamente de um registro específico, enquanto desabilita para os outros, você pode fornecer explicitamente um endpoint para permitir que o pull da imagem recorra ao próprio registro: +```yaml +mirrors: + docker.io: # Sem o endpoint padrão, os pulls falharão se a imagem não estiver disponível em um nó. + registry.k8s.io: # Sem o endpoint padrão, os pulls falharão se a imagem não estiver disponível em um nó. + mirror.example.com: # Com um endpoint padrão explícito, é possível puxar do registro upstream se a imagem não estiver disponível em um nó. + endpoint: + - https://mirror.example.com +``` + +### Tag mais Recente + +Quando nenhuma tag é especificada para uma imagem de contêiner, a tag padrão implícita é `latest`. Essa tag é frequentemente atualizada para apontar para a versão mais recente da imagem. Como essa tag pode apontar para diferentes revisões de uma imagem dependendo de quando é puxada, o registro distribuído **não puxará** a tag `latest` de outros nós. Isso força o containerd a acessar um registro upstream ou espelho de registro para garantir uma visão consistente do que a tag `latest` refere-se. + +Isso está alinhado com a [política especial de `imagePullPolicy`](https://kubernetes.io/docs/concepts/containers/images/#imagepullpolicy-defaulting) observada pelo Kubernetes ao usar a tag `latest` para uma imagem de contêiner. + +O espelhamento da tag `latest` pode ser ativado configurando a variável de ambiente `K3S_P2P_ENABLE_LATEST=true` para o serviço K3s. +Isso não é suportado e não é recomendado, pelos motivos discutidos acima. + +## Segurança + +### Autenticação + +O acesso à API de registro do espelho integrado requer um certificado de cliente válido, assinado pela autoridade certificadora de cliente do cluster. + +O acesso à rede peer-to-peer da tabela de hash distribuída requer uma chave pré-compartilhada que é controlada pelos nós do servidor. +Os nós se autenticam mutuamente usando tanto a chave pré-compartilhada quanto um certificado assinado pela autoridade certificadora do cluster. + +### Preocupações Potenciais + +:::warning +O registro distribuído é construído com base em princípios peer-to-peer e assume um nível igual de privilégio e confiança entre todos os membros do cluster. +Se isso não corresponder à postura de segurança do seu cluster, você não deve habilitar o registro distribuído integrado. +::: + +O registro integrado pode disponibilizar imagens às quais um nó não teria acesso de outra forma. +Por exemplo, se algumas das suas imagens forem puxadas de um registro, projeto ou repositório que exija autenticação via Kubernetes Image Pull Secrets, ou credenciais em `registries.yaml`, o registro distribuído permitirá que outros nós compartilhem essas imagens sem fornecer credenciais para o registro upstream. + +Usuários com acesso para enviar imagens para o armazenamento de imagens do containerd em um nó podem ser capazes de usar isso para "contaminar" a imagem para outros nós do cluster, já que outros nós confiarão na tag anunciada pelo nó e a usarão sem verificar com o registro upstream. +Se a integridade da imagem for importante, você deve usar digests de imagem em vez de tags, pois o digest não pode ser contaminado dessa maneira. + +## Compartilhando Imagens para Ambiente Isolado ou Carregadas Manualmente + +O compartilhamento de imagens é controlado com base no registro de origem. +Imagens carregadas diretamente no containerd por meio de arquivos tar para ambiente isolado (air-gap), ou carregadas diretamente no armazenamento de imagens do containerd usando a ferramenta de linha de comando `ctr`, serão compartilhadas entre os nós se forem marcadas como provenientes de um registro habilitado para espelhamento. + +Observe que o registro upstream de onde as imagens parecem vir não precisa, na verdade, existir ou ser acessível. +Por exemplo, você pode marcar imagens como se fossem de um registro upstream fictício e importar essas imagens para o armazenamento de imagens do containerd. +Você então poderá puxar essas imagens de todos os membros do cluster, contanto que esse registro esteja listado em `registries.yaml`. + +## Enviando Imagens + +O registro integrado é somente leitura e não pode ser enviado diretamente usando `docker push` ou outras ferramentas comuns que interagem com registros OCI. + +As imagens podem ser disponibilizadas manualmente pelo registro integrado executando `ctr -n k8s.io image pull` para puxar uma imagem, ou carregando arquivos de imagem criados com `docker save` por meio do comando `ctr -n k8s.io image import`. +Observe que o namespace `k8s.io` deve ser especificado ao gerenciar imagens via `ctr` para que elas sejam visíveis ao kubelet. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/requirements.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/requirements.md new file mode 100644 index 000000000..3ad39da95 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/requirements.md @@ -0,0 +1,233 @@ +--- +title: Requisitos +--- + +O K3s é muito leve, mas possui alguns requisitos mínimos, conforme descrito abaixo. + +Seja configurando o K3s para ser executado em um contêiner ou como um serviço nativo do Linux, cada nó executando o K3s deve atender aos seguintes requisitos mínimos. Esses requisitos são a base para o K3s e seus componentes empacotados, não incluindo os recursos consumidos pelas cargas de trabalho em si. + +## Pré-requisitos + +Dois nós não podem ter o mesmo hostname. + +Se vários nós tiverem o mesmo hostname ou se hostnames puderem ser reutilizados por um sistema de provisionamento automatizado, use a opção `--with-node-id` para adicionar um sufixo aleatório a cada nó. Alternativamente, crie um nome exclusivo para cada nó e passe-o com `--node-name` ou `$K3S_NODE_NAME` ao adicioná-lo ao cluster. + +## Arquitetura + +O K3s está disponível para as seguintes arquiteturas: +- x86_64 +- armhf +- arm64/aarch64 +- s390x + +:::warning Tamanho da Página ARM64 + +Antes dos lançamentos de maio de 2023 (v1.24.14+k3s1, v1.25.10+k3s1, v1.26.5+k3s1, v1.27.2+k3s1), em sistemas `aarch64/arm64`, o kernel deve usar páginas de 4k. **RHEL9**, **Ubuntu**, **Raspberry Pi OS** e **SLES** atendem a esse requisito. + +::: + +## Sistemas Operacionais + +Espera-se que o K3s funcione na maioria dos sistemas Linux modernos. + +Alguns sistemas operacionais possuem requisitos adicionais de configuração: + + + +Recomenda-se desativar o firewalld: +```bash +systemctl disable firewalld --now +``` + +Se você deseja manter o firewalld habilitado, as seguintes regras são necessárias por padrão: +```bash +firewall-cmd --permanent --add-port=6443/tcp #apiserver +firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 #pods +firewall-cmd --permanent --zone=trusted --add-source=10.43.0.0/16 #services +firewall-cmd --reload +``` + +Portas adicionais podem precisar ser abertas, dependendo da sua configuração. Consulte [Regras de Entrada](#inbound-rules-for-k3s-nodes) para mais informações. Se você alterar o CIDR padrão para pods ou serviços, será necessário atualizar as regras do firewall de acordo. + + + + +Recomenda-se desativar o firewalld: +```bash +systemctl disable firewalld --now +``` + +Se você deseja manter o firewalld habilitado, as seguintes regras são necessárias por padrão: +```bash +firewall-cmd --permanent --add-port=6443/tcp #apiserver +firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 #pods +firewall-cmd --permanent --zone=trusted --add-source=10.43.0.0/16 #services +firewall-cmd --reload +``` + +Portas adicionais podem precisar ser abertas, dependendo da sua configuração. Consulte [Regras de Entrada](#inbound-rules-for-k3s-nodes) para mais informações. Se você alterar o CIDR padrão para pods ou serviços, será necessário atualizar as regras do firewall de acordo. + +Se habilitado, é necessário desativar o nm-cloud-setup e reiniciar o nó: +```bash +systemctl disable nm-cloud-setup.service nm-cloud-setup.timer +reboot +``` + + + +Versões mais antigas do Debian podem sofrer com um bug conhecido no iptables. Consulte [Problemas Conhecidos](../known-issues.md#iptables). + +É recomendável desativar o ufw (firewall descomplicado): +```bash +ufw disable +``` + +Se você deseja manter o ufw habilitado, as seguintes regras são necessárias por padrão: +```bash +ufw allow 6443/tcp #apiserver +ufw allow from 10.42.0.0/16 to any #pods +ufw allow from 10.43.0.0/16 to any #services +``` + +Portas adicionais podem precisar ser abertas, dependendo da sua configuração. Consulte [Regras de Entrada](#inbound-rules-for-k3s-nodes) para mais informações. Se você alterar o CIDR padrão para pods ou serviços, será necessário atualizar as regras do firewall de acordo. + + + +O Raspberry Pi OS é baseado no Debian e pode ser afetado por um bug conhecido no iptables. Consulte [Problemas Conhecidos](../known-issues.md#iptables). + +#### Cgroups + +Instalações padrão do Raspberry Pi OS não iniciam com os `cgroups` habilitados. O **K3S** precisa dos `cgroups` para iniciar o serviço systemd. Os `cgroups` podem ser habilitados adicionando `cgroup_memory=1 cgroup_enable=memory` ao arquivo `/boot/firmware/cmdline.txt`. +**Nota:** No Debian 11 e em versões mais antigas do Raspberry Pi OS, o arquivo cmdline.txt está localizado em `/boot/cmdline.txt`. + +Exemplo cmdline.txt: +``` +console=serial0,115200 console=tty1 root=PARTUUID=58b06195-02 rootfstype=ext4 elevator=deadline fsck.repair=yes rootwait cgroup_memory=1 cgroup_enable=memory +``` + +#### Módulo Vxlan no Ubuntu +No Ubuntu 21.10 até o Ubuntu 23.10, o suporte ao vxlan no Raspberry Pi foi movido para um módulo de kernel separado. Esse passo não é necessário para o Ubuntu 24.04 e versões posteriores. +```bash +sudo apt install linux-modules-extra-raspi +``` + + + +Para mais informações sobre quais sistemas operacionais foram testados com clusters K3s gerenciados pelo Rancher, consulte os [termos de suporte e manutenção do Rancher.](https://rancher.com/support-maintenance-terms/) + +## Hardware + +Os requisitos de hardware escalam com base no tamanho das suas implantações. Os requisitos mínimos são: + +| Node | CPU | RAM | +| ------ | ------- | ------ | +| Server | 2 cores | 2 GB | +| Agent | 1 core | 512 MB | + +O [Perfil de Recursos](../reference/resource-profiling.md) captura os resultados de testes e análises para determinar os requisitos mínimos de recursos para o agente K3s, o servidor K3s com uma carga de trabalho e o servidor K3s com um agente. + +### Disco + +O desempenho do K3s depende do desempenho do banco de dados. Para garantir uma velocidade ideal, recomendamos o uso de um SSD sempre que possível. + +Se estiver implantando o K3s em um Raspberry Pi ou outros dispositivos ARM, é recomendável usar um SSD externo. O etcd é intensivo em gravações; cartões SD e eMMC não conseguem lidar com a carga de E/S. + +### Guia de Dimensionamento de Servidor + +Quando há limitações de CPU e RAM no nó do servidor (plano de controle + etcd), existem restrições sobre a quantidade de nós de agente que podem ser adicionados sob condições padrão de carga de trabalho. + +| Server CPU | Server RAM | Número de Agentes | +| ---------- | ---------- | ----------------- | +| 2 | 4 GB | 0-350 | +| 4 | 8 GB | 351-900 | +| 8 | 16 GB | 901-1800 | +| 16+ | 32 GB | 1800+ | + +:::tip Dimensionamento para Alta Disponibilidade + +Em uma configuração de alta disponibilidade com 3 nós de servidor, o número de agentes pode escalar aproximadamente 50% a mais do que o descrito na tabela anterior. + +Ex: 3 servidores com 4 vCPUs e 8 GB de RAM. Essa configuração pode suportar aproximadamente 1200 agentes. +::: + +Recomenda-se adicionar os nós de agente em lotes de 50 ou menos para permitir que a CPU libere recursos, já que ocorre um pico de uso durante a adição de nós. Lembre-se de modificar o valor padrão de `cluster-cidr` se planejar mais de 255 nós no cluster! Isso garantirá espaço de endereçamento IP suficiente para acomodar todos os nós no ambiente. + +O [Perfil de Recursos](../reference/resource-profiling.md#server-sizing-requirements-for-k3s) contém mais informações sobre como essas recomendações foram determinadas. + + +## Rede + +O servidor K3s precisa que a porta 6443 esteja acessível por todos os nós. + +Os nós precisam ser capazes de se comunicar entre si pela porta UDP 8472 ao usar o backend Flannel VXLAN, ou pela porta UDP 51820 (e 51821 se IPv6 for utilizado) ao usar o backend Flannel WireGuard. O nó não deve escutar em nenhuma outra porta. O K3s utiliza tunelamento reverso, de forma que os nós fazem conexões de saída para o servidor, e todo o tráfego do kubelet passa por esse túnel. No entanto, se você não utilizar o Flannel e fornecer sua própria CNI personalizada, as portas necessárias pelo Flannel não serão necessárias para o K3s. + +Se você deseja utilizar o servidor de métricas, todos os nós devem estar acessíveis entre si pela porta 10250. + +Se você planeja alcançar alta disponibilidade com o etcd embutido, os nós do servidor devem estar acessíveis entre si pelas portas 2379 e 2380. + +:::tip Importante +A porta VXLAN nos nós não deve ser exposta para o mundo, pois isso abre a rede do seu cluster para ser acessada por qualquer pessoa. Execute seus nós por trás de um firewall ou grupo de segurança que desabilite o acesso à porta 8472. +::: + +:::danger +O Flannel depende do [plugin Bridge CNI](https://www.cni.dev/plugins/current/main/bridge/) para criar uma rede L2 que encaminha o tráfego. Pods maliciosos com capacidades `NET_RAW` podem abusar dessa rede L2 para lançar ataques, como [spoofing de ARP](https://static.sched.com/hosted_files/kccncna19/72/ARP%20DNS%20spoof.pdf). Portanto, conforme documentado na [documentação do Kubernetes](https://kubernetes.io/docs/concepts/security/pod-security-standards/), configure um perfil restrito que desabilite `NET_RAW` em pods não confiáveis. +::: + +### Regras de Entrada para os Nós K3s + +| Protocolo | Porta | Fonte | Destino | Descrição | +| --------- | --------- | ------------ | ------------ | ------------------------------------------------------------- | +| TCP | 2379-2380 | Servidores | Servidores | Necessário apenas para HA com etcd embutido | +| TCP | 6443 | Agentes | Servidores | Supervisor do K3s e Servidor de API do Kubernetes | +| UDP | 8472 | Todos os nós | Todos os nós | Necessário apenas para Flannel VXLAN | +| TCP | 10250 | Todos os nós | Todos os nós | Métricas do Kubelet | +| UDP | 51820 | Todos os nós | Todos os nós | Necessário apenas para Flannel Wireguard com IPv4 | +| UDP | 51821 | Todos os nós | Todos os nós | Necessário apenas para Flannel Wireguard com IPv6 | +| TCP | 5001 | Todos os nós | Todos os nós | Necessário apenas para registro distribuído embutido (Spegel) | +| TCP | 6443 | Todos os nós | Todos os nós | Necessário apenas para registro distribuído embutido (Spegel) | + +Normalmente, todo o tráfego de saída é permitido. + +Alterações adicionais no firewall podem ser necessárias dependendo do sistema operacional utilizado. + +## Clusters Grandes + +Os requisitos de hardware dependem do tamanho do seu cluster K3s. Para produção e clusters grandes, recomendamos o uso de uma configuração de alta disponibilidade com um banco de dados externo. As seguintes opções são recomendadas para o banco de dados externo em produção: + +- MySQL +- PostgreSQL +- etcd + +### CPU e Memória + +A seguir estão os requisitos mínimos de CPU e memória para os nós em um servidor K3s de alta disponibilidade: + +| Tamanho da Implantação | Nodes | vCPUs | RAM | +| :--------------------: | :-------: | :---: | :---: | +| Pequeno | Up to 10 | 2 | 4 GB | +| Médio | Up to 100 | 4 | 8 GB | +| Large | Up to 250 | 8 | 16 GB | +| X-Grande | Up to 500 | 16 | 32 GB | +| XX-Grande | 500+ | 32 | 64 GB | + +### Discos + +O desempenho do cluster depende do desempenho do banco de dados. Para garantir velocidade ideal, recomendamos sempre usar discos SSD para suportar seu cluster K3s. Em provedores de nuvem, também é recomendável usar o tamanho mínimo que permita o máximo de IOPS. + +### Rede + +Deve-se considerar o aumento do tamanho do sub-rede para o CIDR do cluster para evitar a falta de IPs para os pods. Isso pode ser feito passando a opção `--cluster-cidr` ao servidor K3s durante a inicialização. + +### Banco de Dados + +O K3s suporta diferentes bancos de dados, incluindo MySQL, PostgreSQL, MariaDB e etcd. Consulte [Cluster Datastore](../datastore/datastore.md) para mais informações. + +A seguir está um guia de dimensionamento para os recursos de banco de dados necessários para executar clusters grandes: + +| Tamanho da Implantação | Nodes | vCPUs | RAM | +| :--------------------: | :-------: | :---: | :---: | +| Pequeno | Up to 10 | 1 | 2 GB | +| Médio | Up to 100 | 2 | 8 GB | +| Grande | Up to 250 | 4 | 16 GB | +| X-Grande | Up to 500 | 8 | 32 GB | +| XX-Grande | 500+ | 16 | 64 GB | diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/server-roles.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/server-roles.md new file mode 100644 index 000000000..8a9a3357e --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/server-roles.md @@ -0,0 +1,50 @@ +--- +title: "Gerenciando Funções do Servidor" +--- + +Iniciar o servidor K3s com `--cluster-init` executará todos os componentes do plano de controle, incluindo o apiserver, o controller-manager, o scheduler e o etcd. É possível desabilitar componentes específicos para dividir as funções do plano de controle e do etcd em nós separados. + +:::info +Este documento só é relevante ao usar etcd incorporado. Quando não estiver usando etcd incorporado, todos os servidores terão a função control-plane e executarão componentes control-plane. +::: + +## Nós `etcd` Dedicados +Para criar um servidor apenas com a função `etcd`, inicie o K3s com todos os componentes do plano de controle desabilitados: +``` +curl -fL https://get.k3s.io | sh -s - server --cluster-init --disable-apiserver --disable-controller-manager --disable-scheduler +``` + +Este primeiro nó iniciará o etcd e aguardará que nós adicionais `etcd` e/ou `control-plane` se juntem. O cluster não poderá ser usado até que você junte um servidor adicional com os componentes `control-plane` habilitados. + +## Nós `control-plane` Dedicados +:::note +Um nó `control-plane` dedicado não pode ser o primeiro servidor no cluster; deve haver um nó existente com a função `etcd` antes de unir nós `control-plane` dedicados. +::: + +Para criar um servidor apenas com a função `control-plane`, inicie o k3s com o etcd desabilitado: +```bash +curl -fL https://get.k3s.io | sh -s - server --token --disable-etcd --server https://:6443 +``` + +Após criar nós de servidor dedicados, as funções selecionadas ficarão visíveis em `kubectl get node`: +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +k3s-server-1 Ready etcd 5h39m v1.20.4+k3s1 +k3s-server-2 Ready control-plane,master 5h39m v1.20.4+k3s1 +``` + +## Adicionando Funções a Servidores Existentes + +As funções podem ser adicionadas a nós dedicados existentes reiniciando o K3s com os sinalizadores de desabilitação removidos. Por exemplo, se você quiser adicionar a função `control-plane` a um nó `etcd` dedicado, você pode remover os sinalizadores `--disable-apiserver --disable-controller-manager --disable-scheduler` da unidade systemd ou do arquivo de configuração e reiniciar o serviço. + +## Sintaxe do Arquivo de Configuração + +Assim como com todos os outros sinalizadores CLI, você pode usar o [Arquivo de configuração](configuration.md#configuration-file) para desabilitar componentes, em vez de passar as opções como sinalizadores CLI. Por exemplo, para criar um nó `etcd` dedicado, você pode colocar os seguintes valores em `/etc/rancher/k3s/config.yaml`: + +```yaml +cluster-init: true +disable-apiserver: true +disable-controller-manager: true +disable-scheduler: true +``` diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/uninstall.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/uninstall.md new file mode 100644 index 000000000..138797d30 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/installation/uninstall.md @@ -0,0 +1,29 @@ +--- +title: Desinstalando o K3s +--- + +:::warning +Desinstalar o K3s pode causar perda de dados! +::: + +Se você instalou o K3s usando o script de instalação, um script para desinstalar o K3s foi gerado durante a instalação. + +Executar o script de desinstalação interrompe o K3s e todos os pods em execução, além de excluir o banco de dados do cluster local, os dados de volumes persistentes do [armazenamento local](../storage.md#setting-up-the-local-storage-provider), a configuração do nó e todos os scripts e ferramentas de linha de comando. + +Ele não remove nenhum dado de bancos de dados externos ou criado por pods que utilizam volumes persistentes externos do Kubernetes. + +Se você planeja reconectar um nó a um cluster existente após desinstalar e reinstalar, certifique-se de excluir o nó do cluster para garantir que o segredo de senha do nó seja removido. Consulte a documentação sobre [Registro de Nós](../architecture.md#how-agent-node-registration-works) para mais informações. + +### Desinstalando Servidores +Para desinstalar o K3s de um nó de servidor, execute: + +```bash +/usr/local/bin/k3s-uninstall.sh +``` + +### Desinstalando Agentes +Para desinstalar o K3s de um nó de agente, execute: + +```bash +/usr/local/bin/k3s-agent-uninstall.sh +``` diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/introduction.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/introduction.md new file mode 100644 index 000000000..98b270e8c --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/introduction.md @@ -0,0 +1,43 @@ +--- +slug: / +title: "K3s - Kubernetes Leve" +--- + +Kubernetes Leve. Fácil de instalar, metade da memória, tudo em um binário de menos de 100MB. + +Ideal para: + +* Edge (Computação de borda) +* Homelab (Hospedagem doméstica) +* Internet das Coisas (IoT) +* Integração Contínua (CI) +* Desenvolvimento +* Computador em placa única (ARM) +* Ambientes isolados (air-gapped) +* Kubernetes embarcado (Embedded K8s) +* Situações em que um doutorado em "clusterologia" de K8s é inviável + +# O que é K3s? + +K3s é uma distribuição Kubernetes totalmente compatível, com os seguintes aprimoramentos: + + * Distribuído como um único binário ou imagem de contêiner mínima. + * Datastore leve baseado em sqlite3 como backend de armazenamento padrão. etcd3, MySQL e Postgres também estão disponíveis. + * Envolvido em um iniciador simples que gerencia grande parte da complexidade relacionada a TLS e opções. + * Seguro por padrão, com configurações razoáveis para ambientes leves. + * A operação de todos os componentes do plano de controle do Kubernetes é encapsulada em um único binário e processo, permitindo que o K3s automatize e gerencie operações complexas do cluster, como a distribuição de certificados. + * As dependências externas foram minimizadas; os únicos requisitos são um kernel moderno e montagens de cgroups. + * Inclui as dependências necessárias para facilitar a criação de clusters no estilo "baterias incluídas": + * containerd / cri-dockerd (runtime de contêineres - CRI) + * Flannel (interface de rede de contêineres - CNI) + * CoreDNS (DNS do cluster) + * Traefik (controlador de Ingress) + * ServiceLB (controlador de balanceamento de carga) + * Kube-router (controlador de políticas de rede) + * Local-path-provisioner (controlador de volumes persistentes) + * Spegel (espelho de registro de imagens de contêiner distribuído) + * Utilitários de host (iptables, socat, etc.) + +# Qual é o significado do nome? + +Queríamos uma instalação do Kubernetes que fosse metade do tamanho em termos de consumo de memória. Kubernetes é uma palavra de 10 letras estilizada como K8s. Então, algo metade do tamanho do Kubernetes seria uma palavra de 5 letras estilizada como K3s. Não existe uma forma longa de K3s e nenhuma pronúncia oficial. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/known-issues.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/known-issues.md new file mode 100644 index 000000000..d825544fe --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/known-issues.md @@ -0,0 +1,98 @@ +--- +title: Problemas Conhecidos +--- +Os problemas conhecidos são atualizados periodicamente e projetados para informá-lo sobre quaisquer problemas que podem não ser resolvidos imediatamente na próxima versão. + +### Snap Docker + +Se você planeja usar o K3s com o Docker, não é recomendado instalar o Docker por meio de um pacote snap, pois ele pode causar problemas na execução do K3s. + +### Iptables + +Se você estiver executando o iptables v1.6.1 e versões mais antigas no modo nftables, poderá encontrar problemas. Recomendamos utilizar iptables mais recentes (como 1.6.1+) para evitar problemas ou executar o modo legado do iptables. + +``` +update-alternatives --set iptables /usr/sbin/iptables-legacy +update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy +``` + +As versões 1.8.0-1.8.4 do Iptables têm problemas conhecidos que podem causar falhas no K3s. Várias distribuições Linux populares são fornecidas com essas versões por padrão. Um bug causa o acúmulo de regras duplicadas, o que afeta negativamente o desempenho e a estabilidade do nó. Consulte [Problema nº 3117](https://github.com/k3s-io/k3s/issues/3117) para obter informações sobre como determinar se você é afetado por esse problema. + +O K3s inclui uma versão funcional do iptables (v1.8.8) que funciona corretamente. Você pode dizer ao K3s para usar sua versão empacotada do iptables iniciando o K3s com a opção `--prefer-bundled-bin` ou desinstalando os pacotes iptables/nftables do seu sistema operacional. + +:::info Nota de Versão + +O sinalizador `--prefer-bundled-bin` está disponível a partir das versões 2022-12 (v1.26.0+k3s1, v1.25.5+k3s1, v1.24.9+k3s1, v1.23.15+k3s1). + +::: + +### Modo Rootless + +Executar o K3s com o modo Rootless é experimental e tem vários [problemas conhecidos.](./advanced.md#known-issues-with-rootless-mode) + +### Atualizando Clusters Hardened de v1.24.x para v1.25.x {#hardened-125} + +O Kubernetes removeu o PodSecurityPolicy da v1.25 em favor do Pod Security Standards. Você pode ler mais sobre o PSS na [documentação upstream](https://kubernetes.io/docs/concepts/security/pod-security-standards/). Para o K3S, há algumas etapas manuais que devem ser seguidas se qualquer `PodSecurityPolicy` tiver sido configurado nos nós. + +1. Em todos os nós, atualize o valor `kube-apiserver-arg` para remover o `PodSecurityPolicy` admission-plugin. Adicione o seguinte valor arg em vez disso: `'admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml'`, mas NÃO reinicie ou atualize o K3S ainda. Abaixo está um exemplo de como um arquivo de configuração pode ficar após essa atualização para o nó a ser reforçado: +```yaml +protect-kernel-defaults: true +secrets-encryption: true +kube-apiserver-arg: + - 'admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml' + - 'audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' + - 'audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' + - 'audit-log-maxage=30' + - 'audit-log-maxbackup=10' + - 'audit-log-maxsize=100' +kube-controller-manager-arg: + - 'terminated-pod-gc-threshold=10' + - 'use-service-account-credentials=true' +kubelet-arg: + - 'streaming-connection-idle-timeout=5m' +``` +2. Crie o arquivo `/var/lib/rancher/k3s/server/psa.yaml` com o seguinte conteúdo. Você pode querer isentar mais namespaces também. O exemplo abaixo isenta `kube-system` (obrigatório), `cis-operator-system` (opcional, mas útil para executar varreduras de segurança por meio do Rancher) e `system-upgrade` (obrigatório se estiver fazendo [Atualizações automatizadas](./upgrades/automated.md)). +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "restricted" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [kube-system, cis-operator-system, system-upgrade] +``` +3. Execute a atualização normalmente. Se estiver fazendo [Atualizações Automatizadas](./upgrades/automated.md), certifique-se de que o namespace onde o pod `system-upgrade-controller` está sendo executado esteja configurado para ser privilegiado de acordo com os [Níveis de Segurança do Pod](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-levels): +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: system-upgrade + labels: + # Esse valor deve ser privilegiado para que o controlador seja executado com sucesso. + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: v1.25 + # Estamos configurando esses valores para o nosso nível _desejado_ de `enforce`, mas observe que os valores abaixo podem ser quaisquer das opções disponíveis. + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/audit-version: v1.25 + pod-security.kubernetes.io/warn: privileged + pod-security.kubernetes.io/warn-version: v1.25 +``` +4. Após a conclusão da atualização, remova quaisquer recursos PSP restantes do cluster. Em muitos casos, pode haver PodSecurityPolicies e recursos RBAC associados em arquivos personalizados usados ​​para proteção dentro de `/var/lib/rancher/k3s/server/manifests/`. Remova esses recursos e o k3s será atualizado automaticamente. Às vezes, devido ao tempo, alguns deles podem ser deixados no cluster, nesse caso você precisará excluí-los manualmente. Se o [Guia de Proteção](./security/hardening-guide.md) foi seguido anteriormente, você deve ser capaz de excluí-los por meio do seguinte: +```sh +# Obtenha os recursos associados aos PSPs +$ kubectl get roles,clusterroles,rolebindings,clusterrolebindings -A | grep -i psp + +# Exclua esses recursos: +$ kubectl delete clusterrole.rbac.authorization.k8s.io/psp:restricted-psp clusterrole.rbac.authorization.k8s.io/psp:svclb-psp clusterrole.rbac.authorization.k8s.io/psp:system-unrestricted-psp clusterrolebinding.rbac.authorization.k8s.io/default:restricted-psp clusterrolebinding.rbac.authorization.k8s.io/system-unrestricted-node-psp-rolebinding && kubectl delete -n kube-system rolebinding.rbac.authorization.k8s.io/svclb-psp-rolebinding rolebinding.rbac.authorization.k8s.io/system-unrestricted-svc-acct-psp-rolebinding +``` diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/basic-network-options.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/basic-network-options.md new file mode 100644 index 000000000..b7b96280a --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/basic-network-options.md @@ -0,0 +1,191 @@ +--- +title: "Opções Básicas de Rede" +--- + +Esta página descreve as opções de configuração de rede do K3s, incluindo configuração ou substituição do Flannel e configuração de IPv6 ou dualStack. + +## Opções de Flannel + +[Flannel](https://github.com/flannel-io/flannel/blob/master/README.md) é um provedor leve de malha de rede de camada 3 que implementa a Kubernetes Container Network Interface (CNI). É o que é comumente chamado de CNI Plugin. + +* As opções de flanela só podem ser definidas em nós de servidor e devem ser idênticas em todos os servidores do cluster. +* O backend padrão para Flannel é `vxlan`. Para habilitar a criptografia, use o backend `wireguard-native`. +* Usar `vxlan` no Rasperry Pi com versões recentes do Ubuntu requer [preparação adicional](../installation/requirements.md?os=pi#operating-systems). +* Usar `wireguard-native` como backend Flannel pode exigir módulos adicionais em algumas distribuições Linux. Consulte o [Guia de instalação do WireGuard](https://www.wireguard.com/install/) para obter detalhes. + As etapas de instalação do WireGuard garantirão que os módulos do kernel apropriados sejam instalados para seu sistema operacional. + Você deve garantir que os módulos do kernel do WireGuard estejam disponíveis em todos os nós, tanto servidores quanto agentes, antes de tentar usar o backend do WireGuard Flannel. + + +| CLI Flag e Valor | Descrição | +| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--flannel-ipv6-masq` | Aplique regras de mascaramento ao tráfego IPv6 (padrão para IPv4). Aplica-se somente em clusters dual-stack ou somente IPv6. Compatível com qualquer backend Flannel diferente de `none`. | +| `--flannel-external-ip` | Use endereços IP externos do nó como o destino para o tráfego Flannel, em vez de IPs internos. Aplica-se somente quando --node-external-ip é definido em um nó. | +| `--flannel-backend=vxlan` | Use VXLAN para encapsular os pacotes. Pode exigir módulos de kernel adicionais no Raspberry Pi. | +| `--flannel-backend=host-gw` | Use rotas IP para podar sub-redes por meio de IPs de nó. Requer conectividade direta de camada 2 entre todos os nós no cluster. | +| `--flannel-backend=wireguard-native` | Use WireGuard para encapsular e criptografar tráfego de rede. Pode exigir módulos de kernel adicionais. | +| `--flannel-backend=ipsec` | Use strongSwan IPSec por meio do binário `swanctl` para criptografar o tráfego de rede. (Obsoleto; será removido na v1.27.0) | +| `--flannel-backend=none` | Desabilite o Flannel completamente. | + +:::info Nota de Versão + +O K3s não inclui mais os binários strongSwan `swanctl` e `charon` a partir das versões 2022-12 (v1.26.0+k3s1, v1.25.5+k3s1, v1.24.9+k3s1, v1.23.15+k3s1). Instale os pacotes corretos no seu nó antes de atualizar ou instalar essas versões se quiser usar o backend `ipsec`. + +::: + +### Migrando de `wireguard` ou `ipsec` para `wireguard-native` + +O backend `wireguard` legado requer a instalação da ferramenta `wg` no host. Este backend não está disponível no K3s v1.26 e superior, em favor do backend `wireguard-native`, que faz interface direta com o kernel. + +O backend legado `ipsec` requer a instalação dos binários `swanctl` e `charon` no host. Este backend não está disponível no K3s v1.27 e superior, em favor do backend `wireguard-native`. + +Recomendamos que os usuários migrem para o novo backend o mais rápido possível. A migração requer um curto período de inatividade enquanto os nós surgem com a nova configuração. Você deve seguir estas duas etapas: + +1. Atualize a configuração do K3s em todos os nós do servidor. Se estiver usando arquivos de configuração, o `/etc/rancher/k3s/config.yaml` deve incluir `flannel-backend: wireguard-native` em vez de `flannel-backend: wireguard` ou `flannel-backend: ipsec`. Se estiver configurando o K3s por meio de sinalizadores CLI na unidade systemd, os sinalizadores equivalentes devem ser alterados. +2. Reinicie todos os nós, começando pelos servidores. + +## CNI Customizado + +Inicie o K3s com `--flannel-backend=none` e instale o CNI de sua escolha. A maioria dos plugins CNI vem com seu próprio mecanismo de política de rede, então é recomendado definir `--disable-network-policy` também para evitar conflitos. Algumas informações importantes a serem consideradas: + + + + +Visite o site [Canal Docs](https://docs.tigera.io/calico/latest/getting-started/kubernetes/flannel/install-for-flannel#installing-calico-for-policy-and-flannel-aka-canal-for-networking). Siga as etapas para instalar o Canal. Modifique o YAML do Canal para que o encaminhamento de IP seja permitido na seção `container_settings`, por exemplo: + +```yaml +"container_settings": { + "allow_ip_forwarding": true +} +``` + +Aplique o Canal YAML. + +Garanta que as configurações foram aplicadas executando o seguinte comando no host: + +```bash +cat /etc/cni/net.d/10-canal.conflist +``` + +Você deverá ver que o encaminhamento de IP está definido como verdadeiro. + + + + +Siga o [Guia de plugins Calico CNI](https://docs.tigera.io/calico/latest/reference/configure-cni-plugins). Modifique o Calico YAML para que o encaminhamento de IP seja permitido na seção `container_settings`, por exemplo: + +```yaml +"container_settings": { + "allow_ip_forwarding": true +} +``` + +Aplique o Calico YAML. + +Garanta que as configurações foram aplicadas executando o seguinte comando no host: + +```bash +cat /etc/cni/net.d/10-calico.conflist +``` + +Você deverá ver que o encaminhamento de IP está definido como verdadeiro. + + + + + +Antes de executar `k3s-killall.sh` ou `k3s-uninstall.sh`, você deve remover manualmente as interfaces `cilium_host`, `cilium_net` e `cilium_vxlan`. Se você não fizer isso, poderá perder a conectividade de rede com o host quando o K3s for interrompido + +```bash +ip link delete cilium_host +ip link delete cilium_net +ip link delete cilium_vxlan +``` + +Além disso, as regras do iptables para cilium devem ser removidas: + +```bash +iptables-save | grep -iv cilium | iptables-restore +ip6tables-save | grep -iv cilium | ip6tables-restore +``` + + + + +## Configuração Control-Plane Egress Selector + +Os agentes e servidores K3s mantêm túneis websocket entre nós que são usados ​​para encapsular a comunicação bidirecional entre os componentes do plano de controle (apiserver) e do agente (kubelet e containerd). +Isso permite que os agentes operem sem expor as portas de streaming do tempo de execução do kubelet e do contêiner a conexões de entrada, e que o plano de controle se conecte aos serviços de cluster ao operar com o agente desabilitado. +Essa funcionalidade é equivalente ao serviço [Konnectivity](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/) comumente usado em outras distribuições do Kubernetes, e é gerenciado por meio da configuração do seletor de saída do apiserver. + +O modo padrão é `agent`. Os modos `pod` ou `cluster` são recomendados ao executar [servidores sem agente](../advanced.md#running-agentless-servers-experimental), para fornecer ao apiserver acesso aos pontos de extremidade do serviço de cluster na ausência de flannel e kube-proxy. + +O modo seletor de saída pode ser configurado em servidores por meio do sinalizador `--egress-selector-mode` e oferece quatro modos: +* `disabled`: o apiserver não usa túneis de agente para se comunicar com kubelets ou endpoints de cluster. +Este modo requer que os servidores executem o kubelet, CNI e kube-proxy e tenham conectividade direta com agentes, ou o apiserver não poderá acessar endpoints de serviço ou executar `kubectl exec` e `kubectl logs`. +* `agent` (padrão): o apiserver usa túneis de agente para se comunicar com kubelets. +Este modo requer que os servidores também executem o kubelet, CNI e kube-proxy, ou o apiserver não poderá acessar endpoints de serviço. +* `pod`: o apiserver usa túneis de agente para se comunicar com kubelets e endpoints de serviço, roteando conexões de endpoint para o agente correto observando Nodes e Endpoints. +**NOTA**: Este modo não funcionará ao usar um CNI que usa seu próprio IPAM e não respeita a alocação de PodCIDR do nó. O modo `cluster` ou `agent` deve ser usado com esses CNIs. +* `cluster`: O apiserver usa túneis de agente para se comunicar com kubelets e endpoints de serviço, roteando conexões de endpoint para o agente correto observando Pods e Endpoints. Este modo tem a maior portabilidade entre diferentes configurações de cluster, ao custo de maior sobrecarga. + +## Rede Dual-stack (IPv4 + IPv6) + +:::info Nota de Versão + +O suporte experimental está disponível a partir de [v1.21.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.21.0%2Bk3s1). +O suporte estável está disponível a partir de [v1.23.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.23.7%2Bk3s1). + +::: + +:::warning Problema Conhecido + +Antes da versão 1.27, o Kubernetes [Problema nº 111695](https://github.com/kubernetes/kubernetes/issues/111695) faz com que o Kubelet ignore os endereços IPv6 do nó se você tiver um ambiente dual-stack e não estiver usando a interface de rede primária para tráfego de cluster. Para evitar esse bug, use a versão 1.27 ou mais recente ou adicione o seguinte sinalizador aos servidores e agentes do K3s: + +``` +--kubelet-arg="node-ip=0.0.0.0" # Para priorizar o tráfego IPv4 +#OR +--kubelet-arg="node-ip=::" # Para priorizar o tráfego IPv6 +``` + +::: + +A rede dual-stack deve ser configurada quando o cluster é criado pela primeira vez. Ela não pode ser habilitada em um cluster existente depois que ele foi iniciado como IPv4-only. + +Para habilitar dual-stack no K3s, você deve fornecer dual-stack válido `cluster-cidr` e `service-cidr` em todos os nós do servidor. Este é um exemplo de uma configuração válida: + +``` +--cluster-cidr=10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112 +``` + +Observe que você pode configurar quaisquer valores válidos de `cluster-cidr` e `service-cidr`, mas as máscaras acima são recomendadas. Se você alterar a máscara `cluster-cidr`, também deverá alterar os valores `node-cidr-mask-size-ipv4` e `node-cidr-mask-size-ipv6` para corresponder aos pods planejados por nó e à contagem total de nós. A maior máscara `service-cidr` suportada é /12 para IPv4 e /112 para IPv6. Lembre-se de permitir tráfego IPv6 se estiver implantando em uma nuvem pública. + +Ao usar endereços IPv6 que não são roteados publicamente, por exemplo, no intervalo ULA, você pode adicionar a opção `--flannel-ipv6-masq` para habilitar o NAT IPv6, pois por padrão os pods usam seus endereços IPv6 para tráfego de saída. + +Se você estiver usando um plugin CNI personalizado, ou seja, um plugin CNI diferente do Flannel, a configuração adicional pode ser necessária. Consulte a documentação dual-stack do seu plugin e verifique se as políticas de rede podem ser habilitadas. + +:::warning Problema Conhecido +Ao definir cluster-cidr e service-cidr com IPv6 como a família primária, o node-ip de todos os membros do cluster deve ser explicitamente definido, colocando o endereço IPv6 desejado do node como o primeiro endereço. Por padrão, o kubelet sempre usa IPv4 como a família de endereços primária. +::: + +## Rede Single-stack IPv6 + +:::info Nota de Versão +Disponível a partir de [v1.22.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.22.9%2Bk3s1) +::: + +:::warning Problema Conhecido +Se sua rota padrão IPv6 for definida por um anúncio de roteador (RA), você precisará definir o sysctl `net.ipv6.conf.all.accept_ra=2`; caso contrário, o nó descartará a rota padrão quando ela expirar. Esteja ciente de que aceitar RAs pode aumentar o risco de [ataques man-in-the-middle](https://github.com/kubernetes/kubernetes/issues/91507). +::: + +Clusters IPv6 single-stack (clusters sem IPv4) são suportados em K3s usando os flags `--cluster-cidr` e `--service-cidr`. Este é um exemplo de uma configuração válida: + +```bash +--cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 +``` + +Ao usar endereços IPv6 que não são roteados publicamente, por exemplo, no intervalo ULA, você pode adicionar a opção `--flannel-ipv6-masq` para habilitar o NAT IPv6, pois por padrão os pods usam seus endereços IPv6 para tráfego de saída. + +## Nós Sem um Nome de Host + +Alguns provedores de nuvem, como Linode, criarão máquinas com "localhost" como nome do host e outros podem não ter um nome de host definido. Isso pode causar problemas com a resolução do nome de domínio. Você pode executar o K3s com o sinalizador `--node-name` ou a variável de ambiente `K3S_NODE_NAME` e isso passará o nome do nó para resolver esse problema. + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/distributed-multicloud.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/distributed-multicloud.md new file mode 100644 index 000000000..399cd6d5b --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/distributed-multicloud.md @@ -0,0 +1,111 @@ +--- +title: "Cluster Híbrido ou MultiCloud Distribuído" +--- + +Um cluster K3s ainda pode ser implantado em nós que não compartilham uma rede privada comum e não estão conectados diretamente (por exemplo, nós em diferentes nuvens públicas). Há duas opções para conseguir isso: a solução multicloud k3s incorporada e a integração com o provedor de VPN `tailscale`. + +:::warning +A latência entre nós aumentará à medida que a conectividade externa exigir mais saltos. Isso reduzirá o desempenho da rede e também poderá impactar a saúde do cluster se a latência for muito alta. +::: + +:::warning +O etcd incorporado não é suportado neste tipo de implantação. Se estiver usando o etcd incorporado, todos os nós do servidor devem ser acessíveis uns aos outros por meio de seus IPs privados. Os agentes podem ser distribuídos em várias redes, mas todos os servidores devem estar no mesmo local. +::: + +### Solução Multicloud K3s Incorporada + +O K3s usa o wireguard para estabelecer uma malha VPN para tráfego de cluster. Cada nó deve ter um IP exclusivo por meio do qual pode ser alcançado (geralmente um IP público). O tráfego do supervisor do K3s usará um túnel websocket, e o tráfego do cluster (CNI) usará um túnel wireguard. + +Para habilitar esse tipo de implantação, você deve adicionar os seguintes parâmetros nos servidores: +```bash +--node-external-ip= --flannel-backend=wireguard-native --flannel-external-ip +``` +e sobre agentes: +```bash +--node-external-ip= +``` + +onde `SERVER_EXTERNAL_IP` é o IP através do qual podemos alcançar o nó do servidor e `AGENT_EXTERNAL_IP` é o IP através do qual podemos alcançar o nó do agente. Note que o parâmetro de configuração `K3S_URL` no agente deve usar o `SERVER_EXTERNAL_IP` para poder se conectar a ele. Lembre-se de verificar os [Requisitos de Rede](../installation/requirements.md#networking) e permitir acesso às portas listadas em endereços internos e externos. + +Tanto `SERVER_EXTERNAL_IP` quanto `AGENT_EXTERNAL_IP` devem ter conectividade entre si e normalmente são IPs públicos. + +:::info IPs Dinâmicos +Se nós forem atribuídos IPs dinâmicos e o IP mudar (por exemplo, na AWS), você deve modificar o parâmetro `--node-external-ip` para refletir o novo IP. Se estiver executando o K3s como um serviço, você deve modificar `/etc/systemd/system/k3s.service` e então executar: + +```bash +systemctl daemon-reload +systemctl restart k3s +``` +::: + +### Integração com o provedor Tailscale VPN (experimental) + +Disponível nas versões v1.27.3, v1.26.6, v1.25.11 e mais recentes. + +O K3s pode ser integrado ao [Tailscale](https://tailscale.com/) para que os nós usem o serviço Tailscale VPN para construir uma malha entre os nós. + +Há quatro etapas a serem executadas com o Tailscale antes de implantar o K3s: + +1. Entre na sua conta Tailscale + +2. Em `Configurações > Chaves`, gere uma chave de autenticação ($AUTH-KEY), que pode ser reutilizável para todos os nós do seu cluster + +3. Decida o podCIDR que o cluster usará (por padrão `10.42.0.0/16`). Anexe o CIDR (ou CIDRs para dual-stack) em Controles de acesso com a stanza: +```yaml +"autoApprovers": { + "routes": { + "10.42.0.0/16": ["your_account@xyz.com"], + "2001:cafe:42::/56": ["your_account@xyz.com"], + }, + }, +``` + +1. Instale o Tailscale em seus nós: +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +Para implantar o K3s com a integração do Tailscale habilitada, você deve adicionar o seguinte parâmetro em cada um dos seus nós: +```bash +--vpn-auth="name=tailscale,joinKey=$AUTH-KEY +``` +ou forneça essas informações em um arquivo e use o parâmetro: +```bash +--vpn-auth-file=$PATH_TO_FILE +``` + +Opcionalmente, se você tiver seu próprio servidor Tailscale (por exemplo, headscale), você pode se conectar a ele anexando `,controlServerURL=$URL` aos parâmetros vpn-auth. + +Em seguida, você pode prosseguir para criar o servidor usando o seguinte comando: + +```bash +k3s server --token --vpn-auth="name=tailscale,joinKey=" --node-external-ip= +``` + +Depois de executar este comando, acesse o console de administração do Tailscale para aprovar o nó e a sub-rede do Tailscale (se ainda não tiverem sido aprovados pelo autoApprovers). + +Depois que o servidor estiver configurado, conecte os agentes usando: + +```bash +k3s agent --token --vpn-auth="name=tailscale,joinKey=" --server https://:6443 --node-external-ip= +``` + +Novamente, aprove o nó e a sub-rede do Tailscale como você fez para o servidor. + +Se você tiver ACLs ativadas no Tailscale, precisará adicionar uma regra "accept" para permitir que os pods se comuniquem entre si. Supondo que a chave de autenticação que você criou marque automaticamente os nós do Tailscale com a tag `testing-k3s`, a regra deve ficar assim: + +```yaml +"acls": [ + { + "action": "accept", + "src": ["tag:testing-k3s", "10.42.0.0/16"], + "dst": ["tag:testing-k3s:*", "10.42.0.0/16:*"], + }, +], +``` + +:::warning + +Se você planeja executar vários clusters K3s usando a mesma rede tailscale, crie [ACLs](https://tailscale.com/kb/1018/acls) apropriadas para evitar conflitos de IP ou use sub-redes podCIDR diferentes para cada cluster. + +::: diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/multus-ipams.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/multus-ipams.md new file mode 100644 index 000000000..e881a5754 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/multus-ipams.md @@ -0,0 +1,165 @@ +--- +title: "Plugins Multus e IPAM" +--- + +[Multus CNI](https://github.com/k8snetworkplumbingwg/multus-cni) é um plugin CNI que permite anexar múltiplas interfaces de rede a pods. O Multus não substitui plugins CNI, em vez disso, ele atua como um multiplexador de plugins CNI. O Multus é útil em certos casos de uso, especialmente quando os pods são intensivos em rede e exigem interfaces de rede extras que suportam técnicas de aceleração de plano de dados, como SR-IOV. + +Para obter mais informações sobre o Multus, consulte a documentação [multus-cni](https://github.com/k8snetworkplumbingwg/multus-cni/tree/master/docs). + +O Multus não pode ser implantado de forma independente. Ele sempre requer pelo menos um plugin CNI convencional que atenda aos requisitos de rede do cluster Kubernetes. Esse plugin CNI se torna o padrão para o Multus e será usado para fornecer a interface primária para todos os pods. Ao implantar K3s com opções padrão, esse plugin CNI é o Flannel. + +:::info Nota de Versão +O K3s usa um caminho binário CNI fixo a partir das versões de outubro de 2024: v1.28.15+k3s1, v1.29.10+k3s1, v1.30.6+k3s1, v1.31.2+k3s1. +::: + +O K3s olha para `$DATA_DIR/data/cni` para binários de plug-ins CNI. Por padrão, é `/var/lib/rancher/k3s/data/cni`. Plug-ins CNI adicionais devem ser instalados neste local. + +Antes dos lançamentos de outubro de 2024, os binários CNI faziam parte do pacote de espaço do usuário do K3s em `$DATA_DIR/data/$HASH/bin`, onde o hash é exclusivo para cada lançamento do K3s. +Isso dificultava a implantação de plug-ins CNI adicionais, pois o caminho mudava toda vez que o K3s era atualizado. +Se estiver implantando o Multus em uma versão mais antiga do K3s, você deve usar `/var/lib/rancher/k3s/data/current/bin/` como o diretório bin do CNI, mas espere que os plug-ins precisem ser reimplantados sempre que o K3s for atualizado. + +### Implantar com um Plugin IPAM + +Um plugin IP Address Manager (IPAM) é necessário para atribuir endereços IP nas interfaces extras criadas pelo Multus. Um ou mais IPAMs podem ser instalados; os exemplos abaixo mostram o uso de um único plugin IPAM, mas eles podem ser combinados conforme necessário. + +Os exemplos de implantação do helm abaixo implantarão um DaemonSet para criar pods Multus para instalar os binários CNI necessários em `/var/lib/rancher/k3s/data/cni/` e a configuração Multus CNI em `/var/lib/rancher/k3s/agent/etc/cni/net.d`. + + + +O plugin IPAM host-local aloca endereços IP de um conjunto de intervalos de endereços. Ele armazena o estado localmente no sistema de arquivos do host, garantindo assim a exclusividade dos endereços IP em um único host. Portanto, não o recomendamos para clusters de vários nós. Este plugin IPAM não requer nenhuma implantação extra. Para mais informações: https://www.cni.dev/plugins/current/ipam/host-local/. + +Para usar o plugin host-local, implante o Multus com a seguinte configuração: +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: multus + namespace: kube-system +spec: + repo: https://rke2-charts.rancher.io + chart: rke2-multus + targetNamespace: kube-system + valuesContent: |- + config: + fullnameOverride: multus + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/cni/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +``` + + + +[Whereabouts](https://github.com/k8snetworkplumbingwg/whereabouts) é um plugin CNI de Gerenciamento de Endereços IP (IPAM) que atribui endereços IP em todo o cluster. + +Para usar o plugin Whereabouts IPAM, implante o Multus com a seguinte configuração: +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: multus + namespace: kube-system +spec: + repo: https://rke2-charts.rancher.io + chart: rke2-multus + targetNamespace: kube-system + valuesContent: |- + config: + fullnameOverride: multus + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/cni/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig + rke2-whereabouts: + fullnameOverride: whereabouts + enabled: true + cniConf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/cni/ +``` + +Ao usar whereabouts no K3s, `configuration_path` deve ser definido como `/var/lib/rancher/k3s/agent/etc/cni/net.d/whereabouts.d/whereabouts.conf` na configuração `ipam` do NetworkAttachmentDefinition. +Por exemplo, ao usar whereabouts como o IPAM com o plugin macvlan: +```yaml +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: macvlan-whereabouts +spec: + config: |- + { + "cniVersion": "1.0.0", + "type": "macvlan", + "master": "eth0", + "mode": "bridge", + "ipam": { + "type": "whereabouts", + "range": "172.17.0.0/24", + "gateway": "172.17.0.1", + "configuration_path": "/var/lib/rancher/k3s/agent/etc/cni/net.d/whereabouts.d/whereabouts.conf" + } + } +``` + + + +O plugin dhcp IPAM pode ser implantado quando já houver um servidor DHCP em execução na rede. Este daemonset cuida da renovação periódica do lease do DHCP. Para mais informações, verifique a documentação oficial do [plugin DHCP IPAM](https://www.cni.dev/plugins/current/ipam/dhcp/). + +Para usar o plugin DHCP, implante o Multus com a seguinte configuração: +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: multus + namespace: kube-system +spec: + repo: https://rke2-charts.rancher.io + chart: rke2-multus + targetNamespace: kube-system + valuesContent: |- + config: + fullnameOverride: multus + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/cni/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig + manifests: + dhcpDaemonSet: true +``` + + + + +### Usando Multus + +Depois que o Multus for implantado, você pode criar recursos NetworkAttachmentDefinition e referenciá-los nas especificações do Pod para anexar interfaces adicionais. +Por exemplo, usando o exemplo whereabouts acima, você pode criar uma interface `eth1` em um Pod usando a anotação `k8s.v1.cni.cncf.io/networks`: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: multus-demo + labels: + app: multus-demo +spec: + replicas: 1 + selector: + matchLabels: + app: multus-demo + template: + metadata: + annotations: + k8s.v1.cni.cncf.io/networks: macvlan-whereabouts@eth1 + labels: + app: multus-demo + spec: + containers: + - name: shell + image: docker.io/rancher/mirrored-library-busybox:1.36.1 + imagePullPolicy: IfNotPresent + command: + - sleep + - "3600" +``` + +Consulte a documentação upstream para obter informações adicionais e exemplos. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/networking-services.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/networking-services.md new file mode 100644 index 000000000..ca49dcd9e --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/networking-services.md @@ -0,0 +1,108 @@ +--- +title: "Serviços de Rede" +--- + +Esta página explica como o CoreDNS, o controlador Traefik Ingress, o controlador de política de rede e o controlador do balanceador de carga ServiceLB funcionam no K3s. + +Consulte a página [Opções de rede de instalação](./basic-network-options.md) para obter detalhes sobre as opções de configuração do Flannel e seleção de backend, ou como configurar seu próprio CNI. + +Para obter informações sobre quais portas precisam ser abertas para K3s, consulte os [Requisitos de rede](../installation/requirements.md#networking). + +## CoreDNS + +O CoreDNS é implantado automaticamente na inicialização do servidor. Para desabilitá-lo, configure todos os servidores no cluster com a opção `--disable=coredns`. + +Se você não instalar o CoreDNS, precisará instalar um provedor de DNS de cluster. + +## Traefik Ingress Controller + +[Traefik](https://traefik.io/) é um proxy reverso HTTP moderno e balanceador de carga feito para implementar microsserviços com facilidade. Ele simplifica a complexidade da rede ao projetar, implementar e executar aplicativos. + +O controlador de entrada do Traefik implanta um serviço LoadBalancer que usa as portas 80 e 443 e anuncia os IPs externos do serviço LoadBalancer no status dos recursos de entrada que ele gerencia. + +Por padrão, o ServiceLB usará todos os nós no cluster para hospedar o Traefik LoadBalancer Service, o que significa que as portas 80 e 443 não poderão ser usadas por outros pods HostPort ou NodePort, e o status dos recursos do Ingress mostrará todos os IPs de nós dos membros do cluster. + +Para restringir os nós usados ​​pelo Traefik e, por extensão, os IPs dos nós anunciados no status de entrada, você pode seguir as instruções na seção [Controlando a seleção de nós do ServiceLB](#controlling-servicelb-node-selection) abaixo para limitar em quais nós o ServiceLB é executado ou adicionar alguns nós a um pool do LoadBalancer e restringir o serviço Traefik a esse pool definindo rótulos correspondentes no Traefik HelmChartConfig. + +O Traefik é implantado por padrão ao iniciar o servidor. Para mais informações, consulte [Gerenciando Componentes Empacotados](../installation/packaged-components.md). O arquivo de configuração padrão é encontrado em `/var/lib/rancher/k3s/server/manifests/traefik.yaml`. + +O arquivo `traefik.yaml` não deve ser editado manualmente, pois o K3s substituirá o arquivo pelos padrões na inicialização. Em vez disso, você deve personalizar o Traefik criando um manifesto `HelmChartConfig` adicional em `/var/lib/rancher/k3s/server/manifests`. Para mais detalhes e um exemplo, consulte [Personalizando componentes empacotados com HelmChartConfig](../helm.md#customizing-packaged-components-with-helmchartconfig). Para mais informações sobre os possíveis valores de configuração, consulte os [Parâmetros de configuração do Traefik Helm.](https://github.com/traefik/traefik-helm-chart/tree/master/traefik). + +Para remover o Traefik do seu cluster, inicie todos os servidores com o sinalizador `--disable=traefik`. + +O K3s inclui o Traefik v2. As versões 1.21 a 1.30 do K3s instalam o Traefik v2, a menos que uma instalação existente do Traefik v1 seja encontrada, caso em que o Traefik não é atualizado para a v2. As versões 1.20 e anteriores do K3s incluem o Traefik v1. Para obter mais informações sobre a versão específica do Traefik incluída no K3s, consulte as Notas de versão da sua versão. + +Para migrar de uma instância mais antiga do Traefik v1, consulte a [documentação do Traefik](https://doc.traefik.io/traefik/migration/v1-to-v2/) e a [ferramenta de migração](https://github.com/traefik/traefik-migration-tool). + +## Controlador de Política de Rede + +O K3s inclui um controlador de política de rede incorporado. A implementação subjacente é a biblioteca do controlador netpol do [kube-router](https://github.com/cloudnativelabs/kube-router) (nenhuma outra funcionalidade do kube-router está presente) e pode ser encontrada [aqui](https://github.com/k3s-io/k3s/tree/master/pkg/agent/netpol). + +Para desativá-lo, inicie cada servidor com o sinalizador `--disable-network-policy`. + +:::note +As regras de iptables de política de rede não são removidas se a configuração do K3s for alterada para desabilitar o controlador de política de rede. Para limpar as regras de política de rede configuradas do kube-router após desabilitar o controlador de política de rede, use o script `k3s-killall.sh` ou limpe-as usando `iptables-save` e `iptables-restore`. Essas etapas devem ser executadas manualmente em todos os nós do cluster. +``` +iptables-save | grep -v KUBE-ROUTER | iptables-restore +ip6tables-save | grep -v KUBE-ROUTER | ip6tables-restore +``` +::: + +## Serviço de Balanceador de Carga + +Qualquer controlador LoadBalancer pode ser implantado no seu cluster K3s. Por padrão, o K3s fornece um balanceador de carga conhecido como [ServiceLB](https://github.com/k3s-io/klipper-lb) (anteriormente Klipper LoadBalancer) que usa portas de host disponíveis. + +O Kubernetes Upstream permite que Serviços do tipo LoadBalancer sejam criados, mas não inclui uma implementação de balanceador de carga padrão, então esses serviços permanecerão `pendentes` até que um seja instalado. Muitos serviços hospedados exigem um provedor de nuvem, como Amazon EC2 ou Microsoft Azure, para oferecer uma implementação de balanceador de carga externo. Por outro lado, o K3s ServiceLB torna possível usar Serviços LoadBalancer sem um provedor de nuvem ou qualquer configuração adicional. + +### Como Funciona o ServiceLB + +O controlador ServiceLB monitora os [Serviços](https://kubernetes.io/docs/concepts/services-networking/service/) do Kubernetes com o campo `spec.type` definido como `LoadBalancer`. + +Para cada LoadBalancer Service, um [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) é criado no namespace `kube-system`. Este DaemonSet, por sua vez, cria ServiceLB Pods com um prefixo `svc-`, em cada nó. Esses pods aproveitam o hostPort usando a porta de serviço, portanto, eles só serão implantados em nós que tenham essa porta disponível. Se não houver nenhum nó com essa porta disponível, o LB permanecerá Pendente. Observe que é possível expor vários Serviços no mesmo nó, desde que usem portas diferentes. + +Quando o ServiceLB Pod é executado em um nó que tem um IP externo configurado, o IP externo do nó é preenchido na lista de endereços `status.loadBalancer.ingress` do Service com `ipMode: VIP`. Caso contrário, o IP interno do nó é usado. + +Se o tráfego para o IP externo estiver sujeito a [Network Address Translation (NAT)](https://en.wikipedia.org/wiki/Network_address_translation) - por exemplo, em nuvens públicas ao usar o IP público do nó como IP externo - o tráfego é roteado para o pod ServiceLB via hostPort. O pod então usa iptables para encaminhar o tráfego para o endereço ClusterIP e porta do Service. Se o tráfego não estiver sujeito a NAT e, em vez disso, chegar com endereço de destino correspondente ao endereço LoadBalancer, o tráfego é interceptado (normalmente por kube-proxy iptables chains ou ipvs) e encaminhado para o endereço ClusterIP e porta do Service. + +### Uso + +Crie um [Serviço do tipo LoadBalancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) no K3s. + +:::warning Nota de Versão +Se o tráfego externo atingir o nó usando um NAT (por exemplo, em nuvens públicas) e você precisar de `externalTrafficPolicy=local` para fins como preservação de IP de origem do cliente, não defina a configuração do k3s `node-external-ip` para nenhum dos nós, pois isso não funcionará corretamente. +::: + +### Controlando a Seleção do Nó ServiceLB + +Adicionar o rótulo `svccontroller.k3s.cattle.io/enablelb=true` a um ou mais nós alterna o controlador ServiceLB para o modo de lista de permissões, onde apenas nós com o rótulo são elegíveis para hospedar pods do LoadBalancer. Os nós que permanecerem sem rótulo serão excluídos do uso pelo ServiceLB. + +:::note +Por padrão, os nós não são rotulados. Enquanto todos os nós permanecerem sem rótulo, todos os nós com portas disponíveis serão usados ​​pelo ServiceLB. +::: + +### Criando pools de Nós do ServiceLB +Para selecionar um subconjunto específico de nós para hospedar pods para um LoadBalancer, adicione o rótulo `enablelb` aos nós desejados e defina valores de rótulo `lbpool` correspondentes nos nós e serviços. Por exemplo: + +1. Rotule o Nó A e o Nó B com `svccontroller.k3s.cattle.io/lbpool=pool1` e `svccontroller.k3s.cattle.io/enablelb=true` +2. Rotule o Nó C e o Nó D com `svccontroller.k3s.cattle.io/lbpool=pool2` e `svccontroller.k3s.cattle.io/enablelb=true` +3. Crie um Serviço LoadBalancer na porta 443 com o rótulo `svccontroller.k3s.cattle.io/lbpool=pool1`. O DaemonSet para este serviço implanta Pods apenas no Nó A e no Nó B. +4. Crie outro Serviço LoadBalancer na porta 443 com o rótulo `svccontroller.k3s.cattle.io/lbpool=pool2`. O DaemonSet implantará Pods somente no Nó C e no Nó D. + +### Desabilitando o ServiceLB + +Para desabilitar o ServiceLB, configure todos os servidores no cluster com o sinalizador `--disable=servicelb`. + +Isso é necessário se você deseja executar um LB diferente, como o MetalLB. + +## Implantando um Gerenciador de Controlhe de Nuvem Externo + +Para reduzir o tamanho binário, o K3s remove todos os provedores de nuvem "na árvore" (integrados). Em vez disso, o K3s fornece um stub do Cloud Controller Manager (CCM) incorporado que faz o seguinte: +- Define os campos de endereço InternalIP e ExternalIP do nó com base nos sinalizadores `--node-ip` e `--node-external-ip`. +- Hospeda o controlador ServiceLB LoadBalancer. +- Limpa a contaminação `node.cloudprovider.kubernetes.io/uninitialized` que está presente quando o provedor de nuvem é definido como `external` + +Antes de implantar um CCM externo, você deve iniciar todos os servidores K3s com o sinalizador `--disable-cloud-controller` para desabilitar o CCM incorporado. + +:::note +Se você desabilitar o CCM integrado e não implantar e configurar corretamente um substituto externo, os nós permanecerão contaminados e não programáveis. +::: \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/networking.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/networking.md new file mode 100644 index 000000000..f6ebcd45e --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/networking/networking.md @@ -0,0 +1,13 @@ +--- +title: "Rede" +--- + +Esta seção contém instruções para configurar a rede no K3s. + +[Opções Básicas de Rede](basic-network-options.md) abrange a configuração básica de rede do cluster, como flanela e configurações de pilha simples/dupla + +[Cluster híbrido/multicloud](distributed-multicloud.md) fornece orientação sobre as opções disponíveis para abranger o cluster k3s em nós remotos ou híbridos + +[Plugins Multus e IPAM](multus-ipams.md) fornece orientação para aproveitar Multus em K3s para ter múltiplas interfaces por pod + +[Serviços de rede: dns, ingress, etc](networking-services.md) explica como CoreDNS, Traefik, controlador de política de rede e controlador ServiceLB funcionam dentro do k3s diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/quick-start.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/quick-start.md new file mode 100644 index 000000000..dfc7b9bc5 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/quick-start.md @@ -0,0 +1,43 @@ +--- +title: "Guia Rápido" +--- + +Este guia ajudará você a iniciar rapidamente um cluster com opções padrão. A [seção de instalação](./installation/installation.md) cobre em maior detalhe como o K3s pode ser configurado. + +Certifique-se de que seus nós atendem aos [requisitos](./installation/requirements.md) antes de continuar. + +Para informações sobre como os componentes do K3s trabalham juntos, consulte a [seção de arquitetura.](./architecture.md) + +:::info +Novo no Kubernetes? A documentação oficial do Kubernetes já possui ótimos tutoriais explicando os conceitos básicos [aqui](https://kubernetes.io/docs/tutorials/kubernetes-basics/). +::: + +## Script de Instalação + +O K3s fornece um script de instalação que é uma maneira prática de instalá-lo como um serviço em sistemas baseados em systemd ou openrc. Este script está disponível em https://get.k3s.io. Para instalar o K3s usando este método, basta executar: + +```bash +curl -sfL https://get.k3s.io | sh - +``` + +Após executar esta instalação: + +- O serviço K3s será configurado para reiniciar automaticamente após reinicializações do nó ou caso o processo falhe ou seja encerrado +- Utilitários adicionais serão instalados, incluindo `kubectl`, `crictl`, `ctr`, `k3s-killall.sh` e `k3s-uninstall.sh` +- Um arquivo [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) será gravado em `/etc/rancher/k3s/k3s.yaml` , e o kubectl instalado pelo K3s o utilizará automaticamente + +Uma instalação de servidor de nó único é um cluster Kubernetes totalmente funcional, incluindo todos os componentes de datastore, control-plane, kubelet e runtime de contêiner necessários para hospedar pods de carga de trabalho. Não é necessário adicionar nós de servidor ou agentes adicionais, mas você pode querer fazê-lo para adicionar capacidade ou redundância ao seu cluster. + +Para instalar nós de agente adicionais e adicioná-los ao cluster, execute o script de instalação com as variáveis de ambiente `K3S_URL` e `K3S_TOKEN`. Aqui está um exemplo mostrando como conectar um agente: + +```bash +curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh - +``` + +Definir o parâmetro `K3S_URL` faz com que o instalador configure o K3s como um agente, em vez de um servidor. O agente do K3s se registrará com o servidor K3s que está escutando na URL fornecida. O valor a ser usado para `K3S_TOKEN` está armazenado em `/var/lib/rancher/k3s/server/node-token` no seu nó servidor. + +:::note +Cada máquina deve ter um hostname exclusivo. Se suas máquinas não tiverem hostnames exclusivos, defina a variável de ambiente `K3S_NODE_NAME` e forneça um valor com um hostname válido e exclusivo para cada nó. +::: + +Se estiver interessado em ter mais nós de servidor, consulte as páginas [Alta Disponibilidade com etcd Embutido](./datastore/ha-embedded.md) e [Alta Disponibilidade com BD Externo](./datastore/ha.md) para mais informações. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/env-variables.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/env-variables.md new file mode 100644 index 000000000..39e789447 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/env-variables.md @@ -0,0 +1,64 @@ +--- +title: Variáveis ​​de Ambiente +--- + +Conforme mencionado no [Guia de Início Rápido](../quick-start.md), você pode usar o script de instalação disponível em https://get.k3s.io para instalar o K3s como um serviço em sistemas baseados em systemd e openrc. + +A forma mais simples deste comando é a seguinte: + +```bash +curl -sfL https://get.k3s.io | sh - +``` + +Ao usar este método para instalar o K3s, as seguintes variáveis ​​de ambiente podem ser usadas para configurar a instalação: + +| Variável de Ambiente | Descrição | +| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `INSTALL_K3S_SKIP_DOWNLOAD` | Se definido como verdadeiro, não fará o download do hash ou binário do K3s. | +| `INSTALL_K3S_SYMLINK` | Por padrão, criará links simbólicos para os binários kubectl, crictl e ctr se os comandos ainda não existirem no caminho. Se definido como 'skip', não criará links simbólicos e 'force' substituirá. | +| `INSTALL_K3S_SKIP_ENABLE` | Se definido como verdadeiro, não habilitará nem iniciará o serviço K3s. | +| `INSTALL_K3S_SKIP_START` | Se definido como verdadeiro, não iniciará o serviço K3s. | +| `INSTALL_K3S_VERSION` | Versão do K3s para baixar do Github. Tentará baixar do canal estável se não for especificado. | +| `INSTALL_K3S_BIN_DIR` | Diretório para instalar o binário do K3s, links e script de desinstalação, ou usar `/usr/local/bin` como padrão. | +| `INSTALL_K3S_BIN_DIR_READ_ONLY` | Se definido como verdadeiro, não gravará arquivos em `INSTALL_K3S_BIN_DIR`, força a configuração `INSTALL_K3S_SKIP_DOWNLOAD=true`. | +| `INSTALL_K3S_SYSTEMD_DIR` | Diretório para instalar o serviço systemd e os arquivos de ambiente, ou use `/etc/systemd/system` como padrão. | +| `INSTALL_K3S_EXEC` | Comando com sinalizadores para usar para iniciar K3s no serviço. Se o comando não for especificado e o `K3S_URL` estiver definido, o padrão será "agent". Se `K3S_URL` não estiver definido, o padrão será "server". Para obter ajuda, consulte [este exemplo.](../installation/configuration.md#configuration-with-install-script) | +| `INSTALL_K3S_NAME` | Nome do serviço systemd a ser criado, será por padrão 'k3s' se estiver executando o k3s como um servidor e 'k3s-agent' se estiver executando o k3s como um agente. Se especificado, o nome será prefixado com 'k3s-'. | +| `INSTALL_K3S_TYPE` | O tipo de serviço systemd a ser criado será definido como padrão pelo comando exec do K3s se não for especificado. | +| `INSTALL_K3S_SELINUX_WARN` | Se definido como verdadeiro, continuará se a política k3s-selinux não for encontrada. | +| `INSTALL_K3S_SKIP_SELINUX_RPM` | Se definido como verdadeiro, ignorará a instalação automática do RPM do k3s. | +| `INSTALL_K3S_CHANNEL_URL` | URL do canal para buscar a URL de download do K3s. O padrão é https://update.k3s.io/v1-release/channels. | +| `INSTALL_K3S_CHANNEL` | Canal a ser usado para buscar a URL de download do K3s. O padrão é "stable". As opções incluem: `stable`, `latest`, `testing`. | + +Este exemplo mostra onde colocar as variáveis ​​de ambiente mencionadas acima como opções (após o pipe): + +```bash +curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh - +``` + +Variáveis ​​de ambiente que começam com `K3S_` serão preservadas para os serviços systemd e openrc usarem. + +Definir `K3S_URL` sem definir explicitamente um comando exec definirá o comando como "agent" por padrão. + +Ao executar o agente, `K3S_TOKEN` também deve ser definido. + +:::info Nota de Versão +Disponível nas versões de outubro de 2024: v1.28.15+k3s1, v1.29.10+k3s1, v1.30.6+k3s1, v1.31.2+k3s1. +::: + +O K3s agora usará `PATH` para encontrar runtimes de contêiner alternativos, além de verificar os caminhos padrão usados ​​pelos pacotes de runtime de contêiner. Para usar esse recurso, você deve modificar a variável de ambiente PATH do serviço K3s para adicionar os diretórios que contêm os binários de runtime de contêiner. + +É recomendado que você modifique um desses dois arquivos de ambiente: + +- /etc/default/k3s # ou k3s-agent +- /etc/sysconfig/k3s # ou k3s-agent + +Este exemplo adicionará o `PATH` em `/etc/default/k3s`: + +```bash +echo PATH=$PATH >> /etc/default/k3s +``` + +:::warning +As alterações no `PATH` devem ser feitas com cuidado para evitar colocar binários não confiáveis ​​no caminho de serviços executados como root. +::: diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/flag-deprecation.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/flag-deprecation.md new file mode 100644 index 000000000..b490886ed --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/flag-deprecation.md @@ -0,0 +1,31 @@ +--- +title: "Flag Depreciação" +--- + +O K3s é um projeto de rápido desenvolvimento e, como tal, precisamos de uma maneira de descontinuar sinalizadores e opções de configuração. Esta página descreve o processo para descontinuar sinalizadores e opções de configuração. Para garantir que os usuários não sejam surpreendidos pela remoção de sinalizadores, o processo é semelhante à [Política de Descontinuação do Kubernetes](https://kubernetes.io/docs/reference/using-api/deprecation-policy/). + +## Processo + +1) Os sinalizadores podem ser declarados como "To Be Deprecated" a qualquer momento. +2) Os sinalizadores que são "To Be Deprecated" devem ser rotulados como tal no próximo patch de todas as versões suportadas atualmente. Além disso, o sinalizador começará a avisar os usuários que ele será descontinuado na próxima versão secundária. +3) Na próxima versão secundária, um sinalizador será marcado como descontinuado na documentação e convertido em um sinalizador oculto no código. O sinalizador continuará a operar e a dar avisos aos usuários. +4) Na ramificação da versão secundária seguinte, os sinalizadores descontinuados se tornarão "não operacionais", causando um erro fatal se usados. Este erro deve explicar ao usuário quaisquer novos sinalizadores ou configurações que substituam este sinalizador. +5) Na próxima versão secundária, os sinalizadores não operacionais serão removidos da documentação e do código. + +## Exemplo + +Um exemplo do processo: + +- `--foo` existe em v1.22.14, v1.23.10 e v1.24.2. +- Após o lançamento da v1.24.2, foi decidido descontinuar `--foo` em favor de `--new-foo`. +- Em v1.22.15, v1.23.11 e v1.24.3, `--foo` continua existindo, mas avisará os usuários: + ``` + [Aviso] --foo será descontinuado em v1.25.0, use `--new-foo` em vez disso + ``` +`--foo` continuará existindo como um sinalizador operacional durante a vida útil de v1.22, v1.23 e v1.24. +- Em v1.25.0, `--foo` é marcado como descontinuado na documentação e ficará oculto no código. Ele continuará funcionando e avisará os usuários para mudarem para `--new-foo`. +- Na v1.26.0, `--foo` causará um erro fatal se usado. A mensagem de erro dirá: + ``` + [Fatal] exit 1: --foo não é mais suportado, use --new-foo em vez disso + ``` +- Na v1.27.0, `--foo` será removido completamente de todo o código e documentação. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/resource-profiling.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/resource-profiling.md new file mode 100644 index 000000000..435c058af --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/reference/resource-profiling.md @@ -0,0 +1,168 @@ +--- +title: Criação de Perfil de Recursos +--- + +Esta seção captura os resultados dos testes para determinar os requisitos de recursos para K3s. + + +## Requisitos mínimos de recursos para K3s + +Os resultados são resumidos da seguinte forma: + +| Componentes | Processador | Min CPU | Mínimo de RAM com Kine/SQLite | Mínimo de RAM com etcd incorporado | +| -------------------------------------- | ------------------------- | ---------------- | ----------------------------- | ---------------------------------- | +| Servidor K3s com uma carga de trabalho | Intel 8375C CPU, 2.90 GHz | 6% de um núcleo | 1596 M | 1606 M | +| Cluster K3s com um único agente | Intel 8375C CPU, 2.90 GHz | 5% de um núcleo | 1428 M | 1450 M | +| K3s agente | Intel 8375C CPU, 2.90 GHz | 3% de um núcleo | 275 M | 275 M | +| Servidor K3s com uma carga de trabalho | Pi4B BCM2711, 1.50 GHz | 30% de um núcleo | 1588 M | 1613 M | +| Cluster K3s com um único agente | Pi4B BCM2711, 1.50 GHz | 25% de um núcleo | 1215 M | 1413 M | +| K3s agente | Pi4B BCM2711, 1.50 GHz | 10% de um núcleo | 268 M | 268 M | + +### Escopo do teste de recursos + +Os testes de recursos tinham como objetivo abordar as seguintes declarações de problemas: + +- Em um cluster de nó único, determine a quantidade mínima legítima de CPU, memória e IOPs que devem ser reservadas para executar toda a pilha do servidor K3s, assumindo que uma carga de trabalho real será implantada no cluster. +- Em um nó de agente (trabalhador), determine a quantidade mínima legítima de CPU, memória e IOPs que devem ser reservadas para os componentes do plano de controle do Kubernetes e do K3s (o agente kubelet e k3s). + +### Componentes Incluídos para Medições de Linha de Base + +Os componentes testados são: + +* K3s v1.26.5 com todos os componentes empacotados habilitados +* Pilha de monitoramento Prometheus + Grafana +* [Exemplo de implantação Nginx do Kubernetes](https://kubernetes.io/docs/tasks/run-application/run-stateless-application-deployment/) + +Estes são números de base para um sistema estável usando apenas componentes empacotados do K3s (Traefik Ingress, Klipper lb, armazenamento de caminho local) executando uma pilha de monitoramento padrão (Prometheus e Grafana) e o aplicativo de exemplo Guestbook. + +Os números de recursos, incluindo IOPS, são apenas para o datastore e plano de controle do Kubernetes e não incluem sobrecarga para agentes de gerenciamento de nível de sistema ou registro, gerenciamento de imagem de contêiner ou quaisquer requisitos específicos de carga de trabalho. + +### Metodologia + +Uma instância autônoma do Prometheus v2.43.0 foi usada para coletar estatísticas de CPU, memória e E/S de disco do host usando `prometheus-node-exporter` instalado via apt. + +`systemd-cgtop` foi usado para verificar a utilização de CPU e memória no nível de cgroup do systemd. `system.slice/k3s.service` rastreia a utilização de recursos para K3s e containerd, enquanto pods individuais estão sob a hierarquia `kubepods`. + +Dados adicionais detalhados de utilização de memória do K3s foram coletados de `kubectl top node` usando o servidor de métricas integrado para os processos de servidor e agente. + +Os números de utilização foram baseados em leituras do 95º percentil da operação em estado estável em nós executando as cargas de trabalho descritas. + +### Ambiente + +| Arch | OS | Sistema | CPU | RAM | Disco | +| ------- | ------------------ | ---------------------- | ---------------------------------------------- | ---- | ------------ | +| x86_64 | Ubuntu 22.04 | AWS c6id.xlarge | Intel Xeon Platinum 8375C CPU, 4 Core 2.90 GHz | 8 GB | NVME SSD | +| aarch64 | Raspberry Pi OS 11 | Raspberry Pi 4 Model B | BCM2711, 4 Core 1.50 GHz | 8 GB | UHS-III SDXC | + + +### Requisitos de Recursos de Linha de Base + +Esta seção captura os resultados dos testes para determinar os requisitos mínimos de recursos para a operação básica do K3. + +#### Servidor K3s com uma Carga de Trabalho + +Estes são os requisitos para um cluster de nó único no qual o servidor K3s compartilha recursos com uma [carga de trabalho simples](https://kubernetes.io/docs/tasks/run-application/run-stateless-application-deployment/). + +Os requisitos de CPU são: + +| Sistema | Uso do Núcleo da CPU | +| ----------- | -------------------- | +| Intel 8375C | 6% de um núcleo | +| Pi4B | 30% de um núcleo | + +Os Requisitos de Memória São: + +| Armazenamento de Dados Testado | Sistema | Memória | +| ------------------------------ | ----------- | ------- | +| Kine/SQLite | Intel 8375C | 1596 M | +| | Pi4B | 1588 M | +| etcd Incorporado | Intel 8375C | 1606 M | +| | Pi4B | 1613 M | + +Os requisitos do disco são: + +| Armazenamento de Dados Testado | IOPS | KiB/sec | Latência | +| ------------------------------ | ---- | ------- | -------- | +| Kine/SQLite | 10 | 500 | < 10 ms | +| etcd Incorporado | 50 | 250 | < 5 ms | + +### Cluster K3s com um Único Agente + +Esses são os requisitos básicos para um cluster K3s com um nó de servidor K3s e um agente K3s, mas sem carga de trabalho. + +#### Servidor K3s +Os requisitos da CPU são: + +| Sistema | Uso do Núcleo da CPU | +| ----------- | -------------------- | +| Intel 8375C | 5% de um núcleo | +| Pi4B | 25% de um núcleo | + +Os requisitos de memória são: + +| Tested Datastore | System | Memory | +| ---------------- | ----------- | ------ | +| Kine/SQLite | Intel 8375C | 1428 M | +| | Pi4B | 1215 M | +| Embedded etcd | Intel 8375C | 1450 M | +| | Pi4B | 1413 M | + +#### Agente K3s + +Os requisitos são: + +| System | Uso do Núcleo da CPU | RAM | +| ----------- | -------------------- | ----- | +| Intel 8375C | 3% de um núcleo | 275 M | +| Pi4B | 5% de um núcleo | 268 M | + + + + +### Análise dos principais impulsionadores da utilização dos recursos + +Os números de utilização do servidor K3s são impulsionados principalmente pelo suporte do repositório de dados do Kubernetes (kine ou etcd), API Server, Controller-Manager e loops de controle do Scheduler, bem como quaisquer tarefas de gerenciamento necessárias para efetuar alterações no estado do sistema. Operações que colocam carga adicional no plano de controle do Kubernetes, como criar/modificar/excluir recursos, causarão picos temporários na utilização. Usar operadores ou aplicativos que fazem uso extensivo do repositório de dados do Kubernetes (como Rancher ou outros aplicativos do tipo Operator) aumentará os requisitos de recursos do servidor. Aumentar a escala do cluster adicionando nós adicionais ou criando muitos recursos de cluster aumentará os requisitos de recursos do servidor. + +Os números de utilização do agente K3s são impulsionados principalmente pelo suporte de loops de controle de gerenciamento do ciclo de vida do contêiner. Operações que envolvem gerenciamento de imagens, provisionamento de armazenamento ou criação/destruição de contêineres causarão picos temporários na utilização. Os pulls de imagem em particular são tipicamente altamente limitados pela CPU e IO, pois envolvem a descompactação do conteúdo da imagem para o disco. Se possível, o armazenamento de carga de trabalho (armazenamento efêmero de pod e volumes) deve ser isolado dos componentes do agente (/var/lib/rancher/k3s/agent) para garantir que não haja conflitos de recursos. + +### Impedindo que agentes e cargas de trabalho interfiram no armazenamento de dados do cluster + +Ao executar em um ambiente em que o servidor também hospeda pods de carga de trabalho, deve-se tomar cuidado para garantir que o agente e o IOPS de carga de trabalho não interfiram no armazenamento de dados. + +Isso pode ser melhor realizado colocando os componentes do servidor (/var/lib/rancher/k3s/server) em um meio de armazenamento diferente dos componentes do agente (/var/lib/rancher/k3s/agent), que incluem o armazenamento de imagens do containerd. + +O armazenamento de carga de trabalho (armazenamento efêmero de pod e volumes) também deve ser isolado do armazenamento de dados. + +O não cumprimento dos requisitos de taxa de transferência e latência do armazenamento de dados pode resultar em resposta atrasada do plano de controle e/ou falha do plano de controle em manter o estado do sistema. + + +## Requisitos de Dimensionamento do Servidor para K3s + +### Ambiente + +- Todos os agentes eram instâncias t3.medium AWS ec2. + - Um único agente era uma instância c5.4xlarge. Isso hospedava a pilha de monitoramento grafana e a impedia de interferir nos recursos do plano de controle. +- O servidor era uma instância c5 AWS ec2. Conforme o número de agentes aumentava, o servidor era atualizado para instâncias c5 maiores. + +### Metodologia + +Esses dados foram recuperados sob condições de teste específicas. Eles variam dependendo do ambiente e das cargas de trabalho. As etapas abaixo fornecem uma visão geral do teste que foi executado para recuperá-los. Ele foi executado pela última vez em v1.31.0+k3s1. Todas as máquinas foram provisionadas na AWS com volumes gp3 padrão de 20 GiB. O teste foi executado com as seguintes etapas: +1. Monitore os recursos no grafana usando a fonte de dados prometheus. +2. Implante as cargas de trabalho de forma a simular a atividade contínua do cluster: + - Uma carga de trabalho básica que aumenta e diminui continuamente + - Uma carga de trabalho que é excluída e recriada em um loop + - Uma carga de trabalho constante que contém vários outros recursos, incluindo CRDs. +3. Junte os nós do agente em lotes de 50-100 por vez. +4. Pare de adicionar agentes quando a CPU do servidor atingir picos acima de 90% de utilização na junção do agente ou se a RAM estiver acima de 80% de utilização. + +### Observações + +- Ao ingressar em agentes, a CPU do servidor viu picos de ~20% acima da linha de base. +- Normalmente, o fator limitante era a CPU, não a RAM. Para a maioria dos testes, quando a CPU atingiu 90% de utilização, a utilização da RAM estava em torno de 60%. + +#### Uma nota sobre Alta Disponibilidade (HA) +No final de cada teste, dois servidores adicionais foram unidos (formando um cluster HA básico de 3 nós) para observar qual efeito isso teve nos recursos originais do servidor. O efeito foi: + - Uma queda perceptível na utilização da CPU, geralmente 30-50%. + - A utilização da RAM permaneceu a mesma. + +Embora não tenha sido testado, com a utilização da CPU como fator limitante em um único servidor, espera-se que o número de agentes que podem ser unidos aumente em ~50% com um cluster HA de 3 nós. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/related-projects.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/related-projects.md new file mode 100644 index 000000000..f5f54b798 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/related-projects.md @@ -0,0 +1,19 @@ +--- +title: "Projetos Relacionados" +--- + +Projetos que implementam a distribuição K3s são adições bem-vindas para ajudar a expandir a comunidade. Esta página apresentará a você uma gama de projetos relacionados ao K3s e pode ajudá-lo a explorar ainda mais suas capacidades e potenciais aplicações. + +Esses projetos demonstram a versatilidade e a adaptabilidade dos K3s em vários ambientes, bem como extensões dos K3s. Todos eles são úteis na criação de clusters Kubernetes de Alta Disponibilidade (HA) em larga escala. + +## k3s-ansible + +Para usuários que buscam inicializar um cluster K3s multi-node e estão familiarizados com o ansible, dê uma olhada no repositório [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). Este conjunto de playbooks do ansible fornece uma maneira conveniente de instalar o K3s em seus nós, permitindo que você se concentre na configuração do seu cluster em vez do processo de instalação. + +## k3sup + +Outro projeto que simplifica o processo de configuração de um cluster K3s é o [k3sup](https://github.com/alexellis/k3sup). Este projeto, escrito em golang, requer apenas acesso ssh aos seus nós. Ele também fornece uma maneira conveniente de implantar o K3s com datastores externos, não apenas o etcd incorporado. + +## autok3s + +Outra ferramenta de provisionamento, [autok3s](https://github.com/cnrancher/autok3s), fornece uma GUI para provisionar cluster k3s em uma variedade de provedores de nuvem, VMs e máquinas locais. Esta ferramenta é útil para usuários que preferem uma interface gráfica para provisionar clusters K3s. \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.24.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.24.X.md new file mode 100644 index 000000000..338384d4a --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.24.X.md @@ -0,0 +1,533 @@ +--- +hide_table_of_contents: true +sidebar_position: 9 +--- + +# v1.24.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.24.17+k3s1](v1.24.X.md#release-v12417k3s1) | Sep 05 2023| [v1.24.17](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12417) | [v0.10.2](https://github.com/k3s-io/kine/releases/tag/v0.10.2) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.3-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.3-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.21.3-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.3-k3s1.23) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.24.16+k3s1](v1.24.X.md#release-v12416k3s1) | Jul 27 2023| [v1.24.16](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12416) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.21.3-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.3-k3s1.23) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.2](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.2) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.24.15+k3s1](v1.24.X.md#release-v12415k3s1) | Jun 26 2023| [v1.24.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12415) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.21.3-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.3-k3s1.23) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.24.14+k3s1](v1.24.X.md#release-v12414k3s1) | May 26 2023| [v1.24.14](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12414) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.21.3-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.3-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.14.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.14.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.24.13+k3s1](v1.24.X.md#release-v12413k3s1) | Apr 20 2023| [v1.24.13](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12413) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.5](https://github.com/opencontainers/runc/releases/tag/v1.1.5) | [v0.21.3-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.3-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.13.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.3) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.24.12+k3s1](v1.24.X.md#release-v12412k3s1) | Mar 27 2023| [v1.24.12](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12412) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.21.3-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.3-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.24.11+k3s1](v1.24.X.md#release-v12411k3s1) | Mar 10 2023| [v1.24.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12411) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.15-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.21.1-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.21.1-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.24.10+k3s1](v1.24.X.md#release-v12410k3s1) | Jan 26 2023| [v1.24.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v12410) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.15-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.20.2-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.24.9+k3s2](v1.24.X.md#release-v1249k3s2) | Jan 11 2023| [v1.24.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1249) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.14-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.14-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.20.2-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.24.9+k3s1](v1.24.X.md#release-v1249k3s1) | Dec 20 2022| [v1.24.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1249) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.12-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.12-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.20.2-k3s1.23) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.24.8+k3s1](v1.24.X.md#release-v1248k3s1) | Nov 18 2022| [v1.24.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1248) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.8-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.8-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.1-k3s1.23](https://github.com/flannel-io/flannel/releases/tag/v0.20.1-k3s1.23) | [v0.6.1](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.1) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.0) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.24.7+k3s1](v1.24.X.md#release-v1247k3s1) | Oct 25 2022| [v1.24.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1247) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.8-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.8-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.19.2](https://github.com/flannel-io/flannel/releases/tag/v0.19.2) | [v0.6.1](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.1) | [v2.9.1](https://github.com/traefik/traefik/releases/tag/v2.9.1) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.24.6+k3s1](v1.24.X.md#release-v1246k3s1) | Sep 28 2022| [v1.24.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1246) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.8-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.8-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.19.2](https://github.com/flannel-io/flannel/releases/tag/v0.19.2) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.24.4+k3s1](v1.24.X.md#release-v1244k3s1) | Aug 25 2022| [v1.24.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1244) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.5.13-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.5.13-k3s1) | [v1.1.3](https://github.com/opencontainers/runc/releases/tag/v1.1.3) | [v0.19.1](https://github.com/flannel-io/flannel/releases/tag/v0.19.1) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.24.3+k3s1](v1.24.X.md#release-v1243k3s1) | Jul 19 2022| [v1.24.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1243) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.5.13-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.5.13-k3s1) | [v1.1.3](https://github.com/opencontainers/runc/releases/tag/v1.1.3) | [v0.18.1](https://github.com/flannel-io/flannel/releases/tag/v0.18.1) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.24.2+k3s2](v1.24.X.md#release-v1242k3s2) | Jul 06 2022| [v1.24.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1242) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.5.13-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.5.13-k3s1) | [v1.1.2](https://github.com/opencontainers/runc/releases/tag/v1.1.2) | [v0.18.1](https://github.com/flannel-io/flannel/releases/tag/v0.18.1) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.24.2+k3s1](v1.24.X.md#release-v1242k3s1) | Jun 27 2022| [v1.24.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1242) | [v0.9.1](https://github.com/k3s-io/kine/releases/tag/v0.9.1) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.6-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.6-k3s1) | [v1.1.2](https://github.com/opencontainers/runc/releases/tag/v1.1.2) | [v0.18.1](https://github.com/flannel-io/flannel/releases/tag/v0.18.1) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.24.1+k3s1](v1.24.X.md#release-v1241k3s1) | Jun 11 2022| [v1.24.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#v1241) | [v0.9.1](https://github.com/k3s-io/kine/releases/tag/v0.9.1) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.5.11-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.5.11-k3s1) | [v1.1.1](https://github.com/opencontainers/runc/releases/tag/v1.1.1) | [v0.17.0](https://github.com/flannel-io/flannel/releases/tag/v0.17.0) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.1) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | + +
+ +## Release [v1.24.17+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.17+k3s1) + +This release updates Kubernetes to v1.24.17, and fixes a number of issues. + +:::warning IMPORTANT +This release includes support for remediating CVE-2023-32187, a potential Denial of Service attack vector on K3s servers. See https://github.com/k3s-io/k3s/security/advisories/GHSA-m4hf-6vgr-75r2 for more information, including mandatory steps necessary to harden clusters against this vulnerability. +::: + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12416). + +### Changes since v1.24.16+k3s1: + +* Update cni plugins version to v1.3.0 [(#8087)](https://github.com/k3s-io/k3s/pull/8087) +* Etcd snapshots retention when node name changes [(#8124)](https://github.com/k3s-io/k3s/pull/8124) +* August Test Backports [(#8128)](https://github.com/k3s-io/k3s/pull/8128) +* Backports for 2023-08 release [(#8135)](https://github.com/k3s-io/k3s/pull/8135) + * K3s's external apiserver listener now declines to add to its certificate any subject names not associated with the kubernetes apiserver service, server nodes, or values of the --tls-san option. This prevents the certificate's SAN list from being filled with unwanted entries. + * K3s no longer enables the apiserver's `enable-aggregator-routing` flag when the egress proxy is not being used to route connections to in-cluster endpoints. + * Updated the embedded containerd to v1.7.3+k3s1 + * Updated the embedded runc to v1.1.8 + * User-provided containerd config templates may now use `{{ template "base" . }}` to include the default K3s template content. This makes it easier to maintain user configuration if the only need is to add additional sections to the file. + * Bump docker/docker module version to fix issues with cri-dockerd caused by recent releases of golang rejecting invalid host headers sent by the docker client. + * Updated kine to v0.10.2 +* K3s etcd-snapshot delete fail to delete local file when called with s3 flag [(#8146)](https://github.com/k3s-io/k3s/pull/8146) +* Fix for cluster-reset backup from s3 when etcd snapshots are disabled [(#8168)](https://github.com/k3s-io/k3s/pull/8168) +* Fixed the etcd retention to delete orphaned snapshots based on the date [(#8191)](https://github.com/k3s-io/k3s/pull/8191) +* Additional backports for 2023-08 release [(#8214)](https://github.com/k3s-io/k3s/pull/8214) + * The version of `helm` used by the bundled helm controller's job image has been updated to v3.12.3 + * Bumped dynamiclistener to address an issue that could cause the apiserver/supervisor listener on 6443 to stop serving requests on etcd-only nodes. + * The K3s external apiserver/supervisor listener on 6443 now sends a complete certificate chain in the TLS handshake. +* Fix runc version bump [(#8243)](https://github.com/k3s-io/k3s/pull/8243) +* Update to v1.24.17 [(#8240)](https://github.com/k3s-io/k3s/pull/8240) +* Add new CLI flag to enable TLS SAN CN filtering [(#8260)](https://github.com/k3s-io/k3s/pull/8260) + * Added a new `--tls-san-security` option. This flag defaults to false, but can be set to true to disable automatically adding SANs to the server's TLS certificate to satisfy any hostname requested by a client. +* Add RWMutex to address controller [(#8276)](https://github.com/k3s-io/k3s/pull/8276) + +----- +## Release [v1.24.16+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.16+k3s1) + + +This release updates Kubernetes to v1.24.16, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12415). + +### Changes since v1.24.14+k3s1: + +* Fix code spell check [(#7861)](https://github.com/k3s-io/k3s/pull/7861) +* Remove file_windows.go [(#7857)](https://github.com/k3s-io/k3s/pull/7857) +* Allow k3s to customize apiServerPort on helm-controller [(#7872)](https://github.com/k3s-io/k3s/pull/7872) +* Fix rootless node password [(#7899)](https://github.com/k3s-io/k3s/pull/7899) +* Backports for 2023-07 release [(#7910)](https://github.com/k3s-io/k3s/pull/7910) + * Resolved an issue that caused agents joined with kubeadm-style bootstrap tokens to fail to rejoin the cluster when their node object is deleted. + * The `k3s certificate rotate-ca` command now supports the data-dir flag. +* Adding cli to custom klipper helm image [(#7916)](https://github.com/k3s-io/k3s/pull/7916) + * The default helm-controller job image can now be overridden with the --helm-job-image CLI flag +* Generation of certs and keys for etcd gated if etcd is disabled [(#7946)](https://github.com/k3s-io/k3s/pull/7946) +* Don't use zgrep in `check-config` if apparmor profile is enforced [(#7955)](https://github.com/k3s-io/k3s/pull/7955) +* Fix image_scan.sh script and download trivy version (#7950) [(#7970)](https://github.com/k3s-io/k3s/pull/7970) +* Adjust default kubeconfig file permissions [(#7985)](https://github.com/k3s-io/k3s/pull/7985) +* Update to v1.24.16 [(#8023)](https://github.com/k3s-io/k3s/pull/8023) + +----- +## Release [v1.24.15+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.15+k3s1) + +This release updates Kubernetes to v1.24.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12414). + +### Changes since v1.24.14+k3s1: + +* E2E Backports - June [(#7726)](https://github.com/k3s-io/k3s/pull/7726) + * Shortcircuit commands with version or help flags #7683 + * Add Rotation certification Check, remove func to restart agents #7097 + * E2E: Sudo for RunCmdOnNode #7686 +* Fix spelling check [(#7753)](https://github.com/k3s-io/k3s/pull/7753) +* Backport version bumps and bugfixes [(#7719)](https://github.com/k3s-io/k3s/pull/7719) + * The bundled metrics-server has been bumped to v0.6.3, and now uses only secure TLS ciphers by default. + * The `coredns-custom` ConfigMap now allows for `*.override` sections to be included in the `.:53` default server block. + * The K3s core controllers (supervisor, deploy, and helm) no longer use the admin kubeconfig. This makes it easier to determine from access and audit logs which actions are performed by the system, and which are performed by an administrative user. + * Bumped klipper-lb image to v0.4.4 to resolve an issue that prevented access to ServiceLB ports from localhost when the Service ExternalTrafficPolicy was set to Local. + * Make LB image configurable when compiling k3s + * K3s now allows nodes to join the cluster even if the node password secret cannot be created at the time the node joins. The secret create will be retried in the background. This resolves a potential deadlock created by fail-closed validating webhooks that block secret creation, where the webhook is unavailable until new nodes join the cluster to run the webhook pod. + * The bundled containerd's aufs/devmapper/zfs snapshotter plugins have been restored. These were unintentionally omitted when moving containerd back into the k3s multicall binary in the previous release. + * The embedded helm controller has been bumped to v0.15.0, and now supports creating the chart's target namespace if it does not exist. +* Remove unused libvirt config [(#7759)](https://github.com/k3s-io/k3s/pull/7759) +* Add format command on Makefile [(#7764)](https://github.com/k3s-io/k3s/pull/7764) +* Update Kubernetes to v1.24.15 [(#7785)](https://github.com/k3s-io/k3s/pull/7785) + +----- +## Release [v1.24.14+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.14+k3s1) + +This release updates Kubernetes to v1.24.14, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12413). + +### Changes since v1.24.13+k3s1: + +* Add E2E testing in Drone [(#7376)](https://github.com/k3s-io/k3s/pull/7376) +* Add integration tests for etc-snapshot server flags [(#7379)](https://github.com/k3s-io/k3s/pull/7379) +* CLI + Config Enhancement [(#7407)](https://github.com/k3s-io/k3s/pull/7407) + * `--Tls-sans` now accepts multiple arguments: `--tls-sans="foo,bar"` + * `Prefer-bundled-bin: true` now works properly when set in `config.yaml.d` files +* Migrate netutil methods into /utils/net.go [(#7435)](https://github.com/k3s-io/k3s/pull/7435) +* Bump Runc + Containerd + Docker for CVE fixes [(#7453)](https://github.com/k3s-io/k3s/pull/7453) +* Bump kube-router version to fix a bug when a port name is used [(#7462)](https://github.com/k3s-io/k3s/pull/7462) +* Kube flags and longhorn tests 1.24 [(#7467)](https://github.com/k3s-io/k3s/pull/7467) +* Local-storage: Fix permission [(#7472)](https://github.com/k3s-io/k3s/pull/7472) +* Backport version bumps and bugfixes [(#7516)](https://github.com/k3s-io/k3s/pull/7516) + * K3s now retries the cluster join operation when receiving a "too many learners" error from etcd. This most frequently occurred when attempting to add multiple servers at the same time. + * K3s once again supports aarch64 nodes with page size > 4k + * The packaged Traefik version has been bumped to v2.9.10 / chart 21.2.0 + * K3s now prints a more meaningful error when attempting to run from a filesystem mounted `noexec`. + * K3s now exits with a proper error message when the server token uses a bootstrap token `id.secret` format. + * Fixed an issue where Addon, HelmChart, and HelmChartConfig CRDs were created without structural schema, allowing the creation of custom resources of these types with invalid content. + * Servers started with the (experimental) --disable-agent flag no longer attempt to run the tunnel authorizer agent component. + * Fixed an regression that prevented the pod and cluster egress-selector modes from working properly. + * K3s now correctly passes through etcd-args to the temporary etcd that is used to extract cluster bootstrap data when restarting managed etcd nodes. + * K3s now properly handles errors obtaining the current etcd cluster member list when a new server is joining the managed etcd cluster. + * The embedded kine version has been bumped to v0.10.1. This replaces the legacy `lib/pq` postgres driver with `pgx`. + * The bundled CNI plugins have been upgraded to v1.2.0-k3s1. The bandwidth and firewall plugins are now included in the bundle. + * The embedded Helm controller now supports authenticating to chart repositories via credentials stored in a Secret, as well as passing repo CAs via ConfigMap. +* Bump containerd/runc to v1.7.1-k3s1/v1.1.7 [(#7536)](https://github.com/k3s-io/k3s/pull/7536) + * The bundled containerd and runc versions have been bumped to v1.7.1-k3s1/v1.1.7 +* Wrap error stating that it is coming from netpol [(#7549)](https://github.com/k3s-io/k3s/pull/7549) +* Update to v1.24.14-k3s1 [(#7577)](https://github.com/k3s-io/k3s/pull/7577) + +----- +## Release [v1.24.13+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.13+k3s1) + +This release updates Kubernetes to v1.24.13, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12412). + +### Changes since v1.24.12+k3s1: + +* Enhance `check-config` [(#7165)](https://github.com/k3s-io/k3s/pull/7165) +* Remove deprecated nodeSelector label beta.kubernetes.io/os (#6970) [(#7122)](https://github.com/k3s-io/k3s/pull/7122) +* Backport version bumps and bugfixes [(#7229)](https://github.com/k3s-io/k3s/pull/7229) + * The bundled local-path-provisioner version has been bumped to v0.0.24 + * The bundled runc version has been bumped to v1.1.5 + * The bundled coredns version has been bumped to v1.10.1 + * When using an external datastore, K3s now locks the bootstrap key while creating initial cluster bootstrap data, preventing a race condition when multiple servers attempted to initialize the cluster simultaneously. + * The client load-balancer that maintains connections to active server nodes now closes connections to servers when they are removed from the cluster. This ensures that agent components immediately reconnect to a current cluster member. + * Fixed a race condition during cluster reset that could cause the operation to hang and time out. +* Updated kube-router to move the default ACCEPT rule at the end of the chain [(#7222)](https://github.com/k3s-io/k3s/pull/7222) + * The embedded kube-router controller has been updated to fix a regression that caused traffic from pods to be blocked by any default drop/deny rules present on the host. Users should still confirm that any externally-managed firewall rules explicitly allow traffic to/from pod and service networks, but this returns the old behavior that was relied upon by some users. +* Update klipper lb and helm-controller [(#7241)](https://github.com/k3s-io/k3s/pull/7241) +* Update Kube-router ACCEPT rule insertion and install script to clean rules before start [(#7277)](https://github.com/k3s-io/k3s/pull/7277) + * The embedded kube-router controller has been updated to fix a regression that caused traffic from pods to be blocked by any default drop/deny rules present on the host. Users should still confirm that any externally-managed firewall rules explicitly allow traffic to/from pod and service networks, but this returns the old behavior that was relied upon by some users. +* Update to v1.24.13-k3s1 [(#7284)](https://github.com/k3s-io/k3s/pull/7284) + +----- +## Release [v1.24.12+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.12+k3s1) + +This release updates Kubernetes to v1.24.12, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12411). + +### Changes since v1.24.11+k3s1: + +* Update flannel and kube-router [(#7063)](https://github.com/k3s-io/k3s/pull/7063) +* Bump various dependencies for CVEs [(#7042)](https://github.com/k3s-io/k3s/pull/7042) +* Enable dependabot [(#7046)](https://github.com/k3s-io/k3s/pull/7046) +* Wait for kubelet port to be ready before setting [(#7065)](https://github.com/k3s-io/k3s/pull/7065) + * The agent tunnel authorizer now waits for the kubelet to be ready before reading the kubelet port from the node object. +* Improve support for rotating the default self-signed certs [(#7080)](https://github.com/k3s-io/k3s/pull/7080) + * The `k3s certificate rotate-ca` checks now support rotating self-signed certificates without the `--force` option. +* Adds a warning about editing to the containerd config.toml file [(#7076)](https://github.com/k3s-io/k3s/pull/7076) +* Update to v1.24.12-k3s1 [(#7105)](https://github.com/k3s-io/k3s/pull/7105) + +----- +## Release [v1.24.11+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.11+k3s1) + +This release updates Kubernetes to v1.24.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v12410). + +### Changes since v1.24.10+k3s1: + +* Add jitter to scheduled snapshots and retry harder on conflicts [(#6783)](https://github.com/k3s-io/k3s/pull/6783) + * Scheduled etcd snapshots are now offset by a short random delay of up to several seconds. This should prevent multi-server clusters from executing pathological behavior when attempting to simultaneously update the snapshot list ConfigMap. The snapshot controller will also be more persistent in attempting to update the snapshot list. +* Bump cri-dockerd [(#6799)](https://github.com/k3s-io/k3s/pull/6799) + * The embedded cri-dockerd has been updated to v0.3.1 +* Bugfix: do not break cert-manager when pprof is enabled [(#6838)](https://github.com/k3s-io/k3s/pull/6838) +* Bump vagrant boxes to fedora37 [(#6859)](https://github.com/k3s-io/k3s/pull/6859) +* Fix cronjob example [(#6865)](https://github.com/k3s-io/k3s/pull/6865) +* Ensure flag type consistency [(#6868)](https://github.com/k3s-io/k3s/pull/6868) +* Wait for cri-dockerd socket [(#6854)](https://github.com/k3s-io/k3s/pull/6854) +* Consolidate E2E tests [(#6888)](https://github.com/k3s-io/k3s/pull/6888) +* Ignore value conflicts when reencrypting secrets [(#6918)](https://github.com/k3s-io/k3s/pull/6918) +* Allow ServiceLB to honor `ExternalTrafficPolicy=Local` [(#6908)](https://github.com/k3s-io/k3s/pull/6908) + * ServiceLB now honors the Service's ExternalTrafficPolicy. When set to Local, the LoadBalancer will only advertise addresses of Nodes with a Pod for the Service, and will not forward traffic to other cluster members. +* Use default address family when adding kubernetes service address to SAN list [(#6905)](https://github.com/k3s-io/k3s/pull/6905) + * The apiserver advertised address and IP SAN entry are now set correctly on clusters that use IPv6 as the default IP family. +* Fix issue with servicelb startup failure when validating webhooks block creation [(#6920)](https://github.com/k3s-io/k3s/pull/6920) + * The embedded cloud controller manager will no longer attempt to unconditionally re-create its namespace and serviceaccount on startup. This resolves an issue that could cause a deadlocked cluster when fail-closed webhooks are in use. +* Backport user-provided CA cert and `kubeadm` bootstrap token support [(#6930)](https://github.com/k3s-io/k3s/pull/6930) + * K3s now functions properly when the cluster CA certificates are signed by an existing root or intermediate CA. You can find a sample script for generating such certificates before K3s starts in the github repo at [contrib/util/certs.sh](https://github.com/k3s-io/k3s/blob/master/contrib/util/certs.sh). + * K3s now supports `kubeadm` style join tokens. `k3s token create` now creates join token secrets, optionally with a limited TTL. + * K3s agents joined with an expired or deleted token stay in the cluster using existing client certificates via the NodeAuthorization admission plugin, unless their Node object is deleted from the cluster. +* Fix access to hostNetwork port on NodeIP when egress-selector-mode=agent [(#6937)](https://github.com/k3s-io/k3s/pull/6937) + * Fixed an issue that would cause the apiserver egress proxy to attempt to use the agent tunnel to connect to service endpoints even in agent or disabled mode. +* Update flannel to v0.21.1 [(#6925)](https://github.com/k3s-io/k3s/pull/6925) +* Allow for multiple sets of leader-elected controllers [(#6942)](https://github.com/k3s-io/k3s/pull/6942) + * Fixed an issue where leader-elected controllers for managed etcd did not run on etcd-only nodes +* Fix etcd and ca-cert rotate issues [(#6955)](https://github.com/k3s-io/k3s/pull/6955) +* Fix ServiceLB dual-stack ingress IP listing [(#6988)](https://github.com/k3s-io/k3s/pull/6988) + * Resolved an issue with ServiceLB that would cause it to advertise node IPv6 addresses, even if the cluster or service was not enabled for dual-stack operation. +* Bump kine to v0.9.9 [(#6976)](https://github.com/k3s-io/k3s/pull/6976) + * The embedded kine version has been bumped to v0.9.9. Compaction log messages are now omitted at `info` level for increased visibility. +* Update to v1.24.11-k3s1 [(#7009)](https://github.com/k3s-io/k3s/pull/7009) + +----- +## Release [v1.24.10+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.10+k3s1) + + +This release updates Kubernetes to v1.24.10+k3s1, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1249). + +### Changes since v1.24.9+k3s2: + +* Pass through default tls-cipher-suites [(#6731)](https://github.com/k3s-io/k3s/pull/6731) + * The K3s default cipher suites are now explicitly passed in to kube-apiserver, ensuring that all listeners use these values. +* Bump containerd to v1.6.15-k3s1 [(#6736)](https://github.com/k3s-io/k3s/pull/6736) + * The embedded containerd version has been bumped to v1.6.15-k3s1 +* Bump action/download-artifact to v3 [(#6748)](https://github.com/k3s-io/k3s/pull/6748) + +----- +## Release [v1.24.9+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.24.9+k3s2) + + +This release updates containerd to v1.6.14 to resolve an issue where pods would lose their CNI information when containerd was restarted. + +### Changes since v1.24.9+k3s1: + +* Backport missing E2E test commits [(#6616)](https://github.com/k3s-io/k3s/pull/6616) +* Bump containerd to v1.6.14-k3s1 [(#6695)](https://github.com/k3s-io/k3s/pull/6695) + * The embedded containerd version has been bumped to v1.6.14-k3s1. This includes a backported fix for [containerd/7843](https://github.com/containerd/containerd/issues/7843) which caused pods to lose their CNI info when containerd was restarted, which in turn caused the kubelet to recreate the pod. + +----- +## Release [v1.24.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.9+k3s1) + + +> ## ⚠️ WARNING +> This release is affected by https://github.com/containerd/containerd/issues/7843, which causes the kubelet to restart all pods whenever K3s is restarted. For this reason, we have removed this K3s release from the channel server. Please use `v1.24.9+k3s2` instead. + +This release updates Kubernetes to v1.24.9, and fixes a number of issues. + +**Breaking Change:** K3s no longer includes `swanctl` and `charon` binaries. If you are using the ipsec flannel backend, please ensure that the strongswan `swanctl` and `charon` packages are installed on your node before upgrading K3s to this release. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1248). + +### Changes since v1.24.8+k3s1: + +* Remove stuff which belongs in the windows executor implementation [(#6502)](https://github.com/k3s-io/k3s/pull/6502) +* Github CI Updates [(#6535)](https://github.com/k3s-io/k3s/pull/6535) +* Fix log for flannelExternalIP use case [(#6540)](https://github.com/k3s-io/k3s/pull/6540) +* Switch from Google Buckets to AWS S3 Buckets [(#6570)](https://github.com/k3s-io/k3s/pull/6570) +* Change secrets-encryption flag to GA [(#6591)](https://github.com/k3s-io/k3s/pull/6591) +* Update flannel to v0.20.2 [(#6589)](https://github.com/k3s-io/k3s/pull/6589) +* Backports for 2022-12 [(#6599)](https://github.com/k3s-io/k3s/pull/6599) + * Added new prefer-bundled-bin flag which force K3s to use its bundle binaries over that of the host tools + * The embedded containerd version has been updated to v1.6.10-k3s1 + * The rootless `port-driver`, `cidr`, `mtu`, `enable-ipv6`, and `disable-host-loopback` settings can now be configured via environment variables. + * The embedded Load-Balancer controller image has been bumped to klipper-lb:v0.4.0, which includes support for the [LoadBalancerSourceRanges](https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#:~:text=loadBalancerSourceRanges) field. + * The embedded Helm controller image has been bumped to klipper-helm:v0.7.4-build20221121 + * The embedded cloud-controller-manager's metrics listener on port 10258 is now disabled when the `--disable-cloud-controller` flag is set. + * Deployments for K3s packaged components now have consistent upgrade strategy and revisionHistoryLimit settings, and will not override scaling decisions by hardcoding the replica count. + * The packaged metrics-server has been bumped to v0.6.2 + * The embedded k3s-root version has been bumped to v0.12.0, based on buildroot 2022.08.1. + * The embedded swanctl and charon binaries have been removed. If you are using the ipsec flannel backend, please ensure that the strongswan `swanctl` and `charon` packages are installed on your node before upgrading k3s. +* Update node12->node16 based GH actions [(#6595)](https://github.com/k3s-io/k3s/pull/6595) +* Update to v1.24.9-k3s1 [(#6623)](https://github.com/k3s-io/k3s/pull/6623) +* Bump containerd to v1.6.12-k3s1 [(#6630)](https://github.com/k3s-io/k3s/pull/6630) + * The embedded containerd version has been bumped to v1.6.12 +* Preload iptable_filter/ip6table_filter [(#6647)](https://github.com/k3s-io/k3s/pull/6647) + +----- +## Release [v1.24.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.8+k3s1) + +This release updates Kubernetes to v1.24.8, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1247). + +### Changes since v1.24.7+k3s1: + +* Add the gateway parameter in netplan [(#6341)](https://github.com/k3s-io/k3s/pull/6341) +* Add a netpol test for podSelector & ingress type [(#6348)](https://github.com/k3s-io/k3s/pull/6348) +* Upgrade kube-router to v1.5.1 [(#6356)](https://github.com/k3s-io/k3s/pull/6356) +* Bump install tests OS images [(#6379)](https://github.com/k3s-io/k3s/pull/6379) +* Add test for node-external-ip config parameter [(#6363)](https://github.com/k3s-io/k3s/pull/6363) +* Update Flannel to v0.20.1 [(#6418)](https://github.com/k3s-io/k3s/pull/6418) +* Backports for 2022-11 + * The packaged traefik helm chart has been bumped to v19.0.0, enabling ingressclass support by default. + * The packaged local-path-provisioner has been bumped to v0.0.23 + * The packaged coredns has been bumped to v1.9.4 + * Fix incorrect defer usage + * The bundled traefik has been updated to v2.9.4 / helm chart v18.3.0 + * Use debugger-friendly compile settings if debug is set + * Add test for node-external-ip config parameter + * Convert containerd config.toml.tmpl linux template to v2 syntax + * Replace fedora-coreos with fedora 36 for install tests + * Fixed an issue that would prevent the deploy controller from handling manifests that include resource types that are no longer supported by the apiserver. + * The embedded helm controller has been bumped to v0.13.0 + * The bundled traefik helm chart has been updated to v18.0.0 + * Add hardened cluster and upgrade tests + * Bump kine to v0.9.6 / sqlite3 v3.39.2 ([cve-2022-35737](https://nvd.nist.gov/vuln/detail/cve-2022-35737)) + * Bumped dynamiclistener library to v0.3.5 [(#6411)](https://github.com/k3s-io/k3s/pull/6411) +* Add some helping logs to avoid wrong configs [(#6432)](https://github.com/k3s-io/k3s/pull/6432) +* Change the priority of address types depending on flannel-external-ip [(#6434)](https://github.com/k3s-io/k3s/pull/6434) +* log kube-router version when starting netpol controller [(#6439)](https://github.com/k3s-io/k3s/pull/6439) +* K3s now indicates specifically which cluster-level configuration flags are out of sync when critical configuration differs between server nodes. [(#6446)](https://github.com/k3s-io/k3s/pull/6446) +* Pull traefik helm chart directly from GH [(#6469)](https://github.com/k3s-io/k3s/pull/6469) +* Update to v1.24.8 [(#6479)](https://github.com/k3s-io/k3s/pull/6479) +* The packaged traefik helm chart has been bumped to 19.0.4 [(#6495)](https://github.com/k3s-io/k3s/pull/6495) +* Move traefik chart repo again [(#6509)](https://github.com/k3s-io/k3s/pull/6509) + +----- +## Release [v1.24.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.7+k3s1) + +This release updates Kubernetes to v1.24.7, and fixes a number of issues. + +The K3s [CIS Hardening Guide](https://docs.k3s.io/security/hardening-guide) has been updated to include configuration changes required to support embedding ServiceLB in the cloud controller manager. If you have followed the hardening guide, please update your policies and RBAC in accordingly. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1246). + +### Changes since v1.24.6+k3s1: + +* Add flannel-external-ip when there is a k3s node-external-ip [(#6189)](https://github.com/k3s-io/k3s/pull/6189) +* Backports for 2022-10 [(#6227)](https://github.com/k3s-io/k3s/pull/6227) + * The embedded metrics-server version has been bumped to v0.6.1 + * The ServiceLB (klipper-lb) service controller is now integrated into the K3s stub cloud controller manager. + * Events recorded to the cluster by embedded controllers are now properly formatted in the service logs. + * Fixed an issue with the apiserver network proxy that caused `kubectl exec` to occasionally fail with `error dialing backend: EOF` + * Fixed an issue with the apiserver network proxy that caused `kubectl exec` and `kubectl logs` to fail when a custom kubelet port was used, and the custom port was blocked by firewall or security group rules. + * The embedded Traefik version has been bumped to v2.9.1 / chart 12.0.0 +* Replace deprecated ioutil package [(#6235)](https://github.com/k3s-io/k3s/pull/6235) +* Fix dualStack test [(#6250)](https://github.com/k3s-io/k3s/pull/6250) +* Update to v1.24.7-k3s1 [(#6270)](https://github.com/k3s-io/k3s/pull/6270) +* Add ServiceAccount for svclb pods [(#6276)](https://github.com/k3s-io/k3s/pull/6276) +* Return ProviderID in URI format [(#6287)](https://github.com/k3s-io/k3s/pull/6287) +* Corrected CCM RBAC to allow for removal of legacy service finalizer during upgrades. [(#6307)](https://github.com/k3s-io/k3s/pull/6307) +* Added a new --flannel-external-ip flag. [(#6322)](https://github.com/k3s-io/k3s/pull/6322) + * When enabled, Flannel traffic will now use the nodes external IPs, instead of internal. + * This is meant for use with distributed clusters that are not all on the same local network. + +----- +## Release [v1.24.6+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.6+k3s1) + +This release updates Kubernetes to v1.24.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1244). + +### Changes since v1.24.4+k3s1: + +* Remove `--containerd` flag from windows kubelet args [(#6028)](https://github.com/k3s-io/k3s/pull/6028) +* Mark v1.24.4+k3s1 as stable [(#6036)](https://github.com/k3s-io/k3s/pull/6036) +* E2E: Add support for CentOS 7 and Rocky 8 [(#6015)](https://github.com/k3s-io/k3s/pull/6015) +* Convert install tests to run PR build of k3s [(#6003)](https://github.com/k3s-io/k3s/pull/6003) +* CI: update Fedora 34 -> 35 [(#5996)](https://github.com/k3s-io/k3s/pull/5996) +* Fix dualStack test and change ipv6 network prefix [(#6023)](https://github.com/k3s-io/k3s/pull/6023) +* Fix e2e tests [(#6018)](https://github.com/k3s-io/k3s/pull/6018) +* Update Flannel version to fix older iptables version issue. [(#6088)](https://github.com/k3s-io/k3s/pull/6088) +* The bundled version of runc has been bumped to v1.1.4 [(#6072)](https://github.com/k3s-io/k3s/pull/6072) +* The embedded containerd version has been bumped to v1.6.8-k3s1 [(#6079)](https://github.com/k3s-io/k3s/pull/6079) +* Bulk Backport of Testing Changes [(#6085)](https://github.com/k3s-io/k3s/pull/6085) +* Add validation check to confirm correct golang version for Kubernetes [(#6113)](https://github.com/k3s-io/k3s/pull/6113) +* Update to v1.24.5 [(#6143)](https://github.com/k3s-io/k3s/pull/6143) +* Update to v1.24.6-k3s1 [(#6164)](https://github.com/k3s-io/k3s/pull/6164) + +----- +## Release [v1.24.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.4+k3s1) + +This release updates Kubernetes to v1.24.4, and fixes a number of issues. + +This release restores use of the `--docker` flag to the v1.24 branch. See [docs/adrs/cri-dockerd.md](https://github.com/k3s-io/k3s/blob/master/docs/adrs/cri-dockerd.md) for more information. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1243). + +### Changes since v1.24.3+k3s1: + +* Put the terraform tests into their own packages and cleanup the test runs [(#5861)](https://github.com/k3s-io/k3s/pull/5861) +* Bumped rootlesskit to v1.0.1 [(#5773)](https://github.com/k3s-io/k3s/pull/5773) +* The initial health-check time for the etcd datastore has been raised from 10 to 30 seconds. [(#5882)](https://github.com/k3s-io/k3s/pull/5882) +* Fixed a regression that caused systemd cgroup driver autoconfiguration to fail on server nodes. [(#5851)](https://github.com/k3s-io/k3s/pull/5851) +* The embedded network policy controller has been updated to kube-router v1.5.0 [(#5789)](https://github.com/k3s-io/k3s/pull/5789) +* The configured service CIDR is now passed to the Kubernetes controller-manager via the `--service-cluster-ip-range` flag. Previously this value was only passed to the apiserver. [(#5894)](https://github.com/k3s-io/k3s/pull/5894) +* Updated dynamiclistener to fix a regression that prevented certificate renewal from working properly. [(#5896)](https://github.com/k3s-io/k3s/pull/5896) +* Promote v1.24.3+k3s1 to stable [(#5889)](https://github.com/k3s-io/k3s/pull/5889) +* ADR: Depreciating and Removing Old Flags [(#5890)](https://github.com/k3s-io/k3s/pull/5890) +* K3s no longer sets containerd's `enable_unprivileged_icmp` and `enable_unprivileged_ports` options on kernels that do not support them. [(#5913)](https://github.com/k3s-io/k3s/pull/5913) +* The etcd error on incorrect peer urls now correctly includes the expected https and 2380 port. [(#5909)](https://github.com/k3s-io/k3s/pull/5909) +* When set, the agent-token value is now written to `$datadir/server/agent-token`, in the same manner as the default (server) token is written to `$datadir/server/token` [(#5906)](https://github.com/k3s-io/k3s/pull/5906) +* Deprecated flags now warn of their v1.25 removal [(#5937)](https://github.com/k3s-io/k3s/pull/5937) +* Fix secrets reencryption for clusters with 8K+ secrets [(#5936)](https://github.com/k3s-io/k3s/pull/5936) +* Bumped minio-go to v7.0.33. This adds support for IMDSv2 credentials. [(#5928)](https://github.com/k3s-io/k3s/pull/5928) +* Upgrade GH Actions macos-10.15 to macos-12 [(#5953)](https://github.com/k3s-io/k3s/pull/5953) +* Added dualstack IP auto detection [(#5920)](https://github.com/k3s-io/k3s/pull/5920) +* The `--docker` flag has been restored to k3s, as a shortcut to enabling embedded cri-dockerd [(#5916)](https://github.com/k3s-io/k3s/pull/5916) +* Update MAINTAINERS with new folks and departures [(#5948)](https://github.com/k3s-io/k3s/pull/5948) +* Removing checkbox indicating backports [(#5947)](https://github.com/k3s-io/k3s/pull/5947) +* fix checkError in terraform/testutils [(#5893)](https://github.com/k3s-io/k3s/pull/5893) +* Add scripts to run e2e test using ansible [(#5134)](https://github.com/k3s-io/k3s/pull/5134) +* Updated flannel to v0.19.1 [(#5962)](https://github.com/k3s-io/k3s/pull/5962) +* Update run scripts [(#5979)](https://github.com/k3s-io/k3s/pull/5979) +* Convert install/cgroup tests to yaml based config [(#5992)](https://github.com/k3s-io/k3s/pull/5992) +* E2E: Local cluster testing [(#5977)](https://github.com/k3s-io/k3s/pull/5977) +* Add nightly install github action [(#5998)](https://github.com/k3s-io/k3s/pull/5998) +* Convert codespell from Drone to GH actions [(#6004)](https://github.com/k3s-io/k3s/pull/6004) +* Update to v1.24.4 [(#6014)](https://github.com/k3s-io/k3s/pull/6014) + +----- +## Release [v1.24.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.3+k3s1) + +This release updates Kubernetes to v1.24.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1242). + +### Changes since v1.24.2+k3s2: +* Updated rancher/remotedialer to address a potential memory leak. [(#5784)](https://github.com/k3s-io/k3s/pull/5784) +* The embedded runc binary has been bumped to v1.1.3 [(#5783)](https://github.com/k3s-io/k3s/pull/5783) +* Fixed a regression that caused some containerd labels to be empty in cadvisor pod metrics [(#5812)](https://github.com/k3s-io/k3s/pull/5812) +* Replace dapper testing with regular docker [(#5805)](https://github.com/k3s-io/k3s/pull/5805) +* Promote v1.23.8+k3s2 to stable [(#5814)](https://github.com/k3s-io/k3s/pull/5814) +* Fixed an issue that would cause etcd restore to fail when restoring a snapshot made with secrets encryption enabled if the --secrets-encryption command was not included in the config file or restore command. [(#5817)](https://github.com/k3s-io/k3s/pull/5817) +* Fix deletion of svclb DaemonSet when Service is deleted +* Fixed a regression that caused ServiceLB DaemonSets to remain present after their corresponding Services were deleted. + Manual cleanup of orphaned `svclb-*` DaemonSets from the `kube-system` namespace may be necessary if any LoadBalancer Services were deleted while running an affected release. [(#5824)](https://github.com/k3s-io/k3s/pull/5824) +* Address issues with etcd snapshots +* Scheduled etcd snapshots are now compressed when snapshot compression is enabled. +* The default etcd snapshot timeout has been raised to 5 minutes. + Only one scheduled etcd snapshot will run at a time. If another snapshot would occur while the previous snapshot is still in progress, an error will be logged and the second scheduled snapshot will be skipped. +* S3 objects for etcd snapshots are now labeled with the correct content-type when compression is not enabled. [(#5833)](https://github.com/k3s-io/k3s/pull/5833) +* Update to v1.24.3 [(#5870)](https://github.com/k3s-io/k3s/pull/5870) + +----- +## Release [v1.24.2+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.24.2+k3s2) + +This fixes several issues in the v1.24.2+k3s1 and prior releases. + +### Changes since v1.24.2+k3s1: + +* Bumped kine to fix an issue where namespaced lists that included a field-selector on metadata.name would fail to return results when using a sql storage backend. ([#5795](https://github.com/k3s-io/k3s/pull/5795)) +* K3s will no longer log panics after upgrading directly from much older kubernetes releases, or when deploying services with `type: externalname`. ([#5771](https://github.com/k3s-io/k3s/pull/5771)) +* Fixed an issue that prevented `kubectl logs` and other functionality that requires a connection to the agent from working correctly when the server's `--bind-address` flag was used, or when k3s is used behind a http proxy. ([#5780](https://github.com/k3s-io/k3s/pull/5780)) +* Fixed an issue that prevented newer versions of k3s from joining clusters that do not have egress-selector-mode support. ([#5785](https://github.com/k3s-io/k3s/pull/5785)) +* Remove go-powershell dead dependency ([#5777](https://github.com/k3s-io/k3s/pull/5777)) + +----- +## Release [v1.24.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.2+k3s1) + +This release updates Kubernetes to v1.24.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1241). + +### Changes since v1.24.1+k3s1: + +* Remove kube-ipvs0 interface when cleaning up [(#5644)](https://github.com/k3s-io/k3s/pull/5644) +* The `--flannel-wireguard-mode` switch was added to the k3s cli to configure the wireguard tunnel mode with the wireguard native backend [(#5552)](https://github.com/k3s-io/k3s/pull/5552) +* Introduce the flannelcniconf flag to set the desired flannel cni configuration [(#5656)](https://github.com/k3s-io/k3s/pull/5656) +* Integration Test: Startup [(#5630)](https://github.com/k3s-io/k3s/pull/5630) +* E2E Improvements and groundwork for test-pad tool [(#5593)](https://github.com/k3s-io/k3s/pull/5593) +* Update SECURITY.md [(#5607)](https://github.com/k3s-io/k3s/pull/5607) +* Introduce --enable-pprof flag to optionally run pprof server [(#5527)](https://github.com/k3s-io/k3s/pull/5527) +* E2E: Dualstack test [(#5617)](https://github.com/k3s-io/k3s/pull/5617) +* Pods created by ServiceLB are now all placed in the `kube-system` namespace, instead of in the same namespace as the Service. This allows for [enforcing Pod Security Standards](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/) in user namespaces without breaking ServiceLB. [(#5657)](https://github.com/k3s-io/k3s/pull/5657) +* E2E: testpad prep, add alternate scripts location [(#5692)](https://github.com/k3s-io/k3s/pull/5692) +* Add arm tests and upgrade tests [(#5526)](https://github.com/k3s-io/k3s/pull/5526) +* Delay service readiness until after startuphooks have finished [(#5649)](https://github.com/k3s-io/k3s/pull/5649) +* Disable urfave markdown/man docs generation [(#5566)](https://github.com/k3s-io/k3s/pull/5566) +* The embedded etcd snapshot controller will no longer fail to process snapshot files containing characters that are invalid for use in ConfigMap keys. [(#5702)](https://github.com/k3s-io/k3s/pull/5702) +* Environment variables prefixed with `CONTAINERD_` now take priority over other existing variables, when passed through to containerd. [(#5706)](https://github.com/k3s-io/k3s/pull/5706) +* The embedded etcd instance no longer accepts connections from other nodes while resetting or restoring. [(#5542)](https://github.com/k3s-io/k3s/pull/5542) +* Enable compatibility tests for k3s s390x [(#5658)](https://github.com/k3s-io/k3s/pull/5658) +* Containerd: Enable enable_unprivileged_ports and enable_unprivileged_… [(#5538)](https://github.com/k3s-io/k3s/pull/5538) +* The embedded Helm controller now properly updates Chart deployments when HelmChartConfig resources are updated or deleted. [(#5731)](https://github.com/k3s-io/k3s/pull/5731) +* Update to v1.24.2 [(#5749)](https://github.com/k3s-io/k3s/pull/5749) + +----- +## Release [v1.24.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.24.1+k3s1) + +This release updates Kubernetes to v1.24.1, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1240). + +### Changes since v1.24.0+k3s1: + +* Objects will be removed from Kubernetes when they are removed from manifest files. [(#5560)](https://github.com/k3s-io/k3s/pull/5560) +* Remove errant unversioned etcd go.mod entry [(#5548)](https://github.com/k3s-io/k3s/pull/5548) +* Pass the node-ip values to kubelet [(#5579)](https://github.com/k3s-io/k3s/pull/5579) +* The integrated apiserver network proxy's operational mode can now be set with `--egress-selector-mode`. [(#5577)](https://github.com/k3s-io/k3s/pull/5577) +* remove dweomer from maintainers [(#5582)](https://github.com/k3s-io/k3s/pull/5582) +* Bump dynamiclistener to v0.3.3 [(#5554)](https://github.com/k3s-io/k3s/pull/5554) +* Update to v1.24.1-k3s1 [(#5616)](https://github.com/k3s-io/k3s/pull/5616) +* Re-add `--cloud-provider=external` kubelet arg [(#5628)](https://github.com/k3s-io/k3s/pull/5628) +* Revert "Give kubelet the node-ip value (#5579)" [(#5636)](https://github.com/k3s-io/k3s/pull/5636) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.25.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.25.X.md new file mode 100644 index 000000000..de3ea4a8c --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.25.X.md @@ -0,0 +1,583 @@ +--- +hide_table_of_contents: true +sidebar_position: 8 +--- + +# v1.25.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.25.16+k3s4](v1.25.X.md#release-v12516k3s4) | Dec 07 2023| [v1.25.16](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12516) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.7-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.15+k3s2](v1.25.X.md#release-v12515k3s2) | Nov 08 2023| [v1.25.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12515) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.7-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.15+k3s1](v1.25.X.md#release-v12515k3s1) | Oct 30 2023| [v1.25.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12515) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.7-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.14+k3s1](v1.25.X.md#release-v12514k3s1) | Sep 20 2023| [v1.25.14](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12514) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.6-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.6-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.13+k3s1](v1.25.X.md#release-v12513k3s1) | Sep 05 2023| [v1.25.13](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12513) | [v0.10.2](https://github.com/k3s-io/kine/releases/tag/v0.10.2) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.3-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.3-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.12+k3s1](v1.25.X.md#release-v12512k3s1) | Jul 27 2023| [v1.25.12](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12512) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.22.0](https://github.com/flannel-io/flannel/releases/tag/v0.22.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.2](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.2) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.11+k3s1](v1.25.X.md#release-v12511k3s1) | Jun 26 2023| [v1.25.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12511) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.22.0](https://github.com/flannel-io/flannel/releases/tag/v0.22.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.10+k3s1](v1.25.X.md#release-v12510k3s1) | May 26 2023| [v1.25.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v12510) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.14.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.14.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.9+k3s1](v1.25.X.md#release-v1259k3s1) | Apr 20 2023| [v1.25.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1259) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.5](https://github.com/opencontainers/runc/releases/tag/v1.1.5) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.13.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.3) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.25.8+k3s1](v1.25.X.md#release-v1258k3s1) | Mar 27 2023| [v1.25.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1258) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.25.7+k3s1](v1.25.X.md#release-v1257k3s1) | Mar 10 2023| [v1.25.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1257) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.15-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.21.1](https://github.com/flannel-io/flannel/releases/tag/v0.21.1) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.25.6+k3s1](v1.25.X.md#release-v1256k3s1) | Jan 26 2023| [v1.25.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1256) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.15-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2](https://github.com/flannel-io/flannel/releases/tag/v0.20.2) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.25.5+k3s2](v1.25.X.md#release-v1255k3s2) | Jan 11 2023| [v1.25.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1255) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.14-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.14-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2](https://github.com/flannel-io/flannel/releases/tag/v0.20.2) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.25.5+k3s1](v1.25.X.md#release-v1255k3s1) | Dec 20 2022| [v1.25.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1255) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.12-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.12-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2](https://github.com/flannel-io/flannel/releases/tag/v0.20.2) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.25.4+k3s1](v1.25.X.md#release-v1254k3s1) | Nov 18 2022| [v1.25.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1254) | [v0.9.6](https://github.com/k3s-io/kine/releases/tag/v0.9.6) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.8-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.8-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.1](https://github.com/flannel-io/flannel/releases/tag/v0.20.1) | [v0.6.1](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.1) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.0) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.25.3+k3s1](v1.25.X.md#release-v1253k3s1) | Oct 25 2022| [v1.25.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1253) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.8-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.8-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.19.2](https://github.com/flannel-io/flannel/releases/tag/v0.19.2) | [v0.6.1](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.1) | [v2.9.1](https://github.com/traefik/traefik/releases/tag/v2.9.1) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.25.2+k3s1](v1.25.X.md#release-v1252k3s1) | Sep 28 2022| [v1.25.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1252) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.6.8-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.8-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.19.2](https://github.com/flannel-io/flannel/releases/tag/v0.19.2) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | +| [v1.25.0+k3s1](v1.25.X.md#release-v1250k3s1) | Sep 12 2022| [v1.25.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#v1250) | [v0.9.3](https://github.com/k3s-io/kine/releases/tag/v0.9.3) | [3.36.0](https://sqlite.org/releaselog/3_36_0.html) | [v3.5.3-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.3-k3s1) | [v1.5.13-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.5.13-k3s2) | [v1.1.3](https://github.com/opencontainers/runc/releases/tag/v1.1.3) | [v0.19.1](https://github.com/flannel-io/flannel/releases/tag/v0.19.1) | [v0.5.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2) | [v2.6.2](https://github.com/traefik/traefik/releases/tag/v2.6.2) | [v1.9.1](https://github.com/coredns/coredns/releases/tag/v1.9.1) | [v0.12.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.12.3) | [v0.0.21](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.21) | + +
+ +## Release [v1.25.16+k3s4](https://github.com/k3s-io/k3s/releases/tag/v1.25.16+k3s4) + + +This release updates Kubernetes to v1.25.16, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12515). + +### Changes since v1.25.15+k3s2: + +* Etcd status condition [(#8819)](https://github.com/k3s-io/k3s/pull/8819) +* Backports for 2023-11 release [(#8880)](https://github.com/k3s-io/k3s/pull/8880) + * New timezone info in Docker image allows the use of `spec.timeZone` in CronJobs + * Bumped kine to v0.11.0 to resolve issues with postgres and NATS, fix performance of watch channels under heavy load, and improve compatibility with the reference implementation. + * Containerd may now be configured to use rdt or blockio configuration by defining `rdt_config.yaml` or `blockio_config.yaml` files. + * Add agent flag disable-apiserver-lb, agent will not start load balance proxy. + * Improved ingress IP ordering from ServiceLB + * Disable helm CRD installation for disable-helm-controller + * Omit snapshot list configmap entries for snapshots without extra metadata + * Add jitter to client config retry to avoid hammering servers when they are starting up +* Handle nil pointer when runtime core is not ready in etcd [(#8889)](https://github.com/k3s-io/k3s/pull/8889) +* Improve dualStack log [(#8867)](https://github.com/k3s-io/k3s/pull/8867) +* Bump dynamiclistener; reduce snapshot controller log spew [(#8904)](https://github.com/k3s-io/k3s/pull/8904) + * Bumped dynamiclistener to address a race condition that could cause a server to fail to sync its certificates into the Kubernetes secret + * Reduced etcd snapshot log spam during initial cluster startup +* Fix etcd snapshot S3 issues [(#8939)](https://github.com/k3s-io/k3s/pull/8939) + * Don't apply S3 retention if S3 client failed to initialize + * Don't request metadata when listing S3 snapshots + * Print key instead of file path in snapshot metadata log message +* Update to v1.25.16 [(#8923)](https://github.com/k3s-io/k3s/pull/8923) +* Remove s390x steps temporarily since runners are disabled [(#8993)](https://github.com/k3s-io/k3s/pull/8993) +* Remove s390x from manifest script [(#8994)](https://github.com/k3s-io/k3s/pull/8994) + +----- +## Release [v1.25.15+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.25.15+k3s2) + + +This release updates Kubernetes to v1.25.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12515). + +### Changes since v1.25.15+k3s1: + +* E2E Domain Drone Cleanup [(#8584)](https://github.com/k3s-io/k3s/pull/8584) +* Fix SystemdCgroup in templates_linux.go [(#8767)](https://github.com/k3s-io/k3s/pull/8767) + * Fixed an issue with identifying additional container runtimes +* Update traefik chart to v25.0.0 [(#8777)](https://github.com/k3s-io/k3s/pull/8777) +* Update traefik to fix registry value [(#8791)](https://github.com/k3s-io/k3s/pull/8791) + +----- +## Release [v1.25.15+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.15+k3s1) + + +This release updates Kubernetes to v1.25.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12514). + +### Changes since v1.25.14+k3s1: + +* Fix error reporting [(#8413)](https://github.com/k3s-io/k3s/pull/8413) +* Add context to flannel errors [(#8421)](https://github.com/k3s-io/k3s/pull/8421) +* Testing Backports for September [(#8301)](https://github.com/k3s-io/k3s/pull/8301) +* Include the interface name in the error message [(#8437)](https://github.com/k3s-io/k3s/pull/8437) +* Add extraArgs to tailscale [(#8466)](https://github.com/k3s-io/k3s/pull/8466) +* Update kube-router [(#8445)](https://github.com/k3s-io/k3s/pull/8445) +* Added error when cluster reset while using server flag [(#8457)](https://github.com/k3s-io/k3s/pull/8457) + * The user will receive a error when --cluster-reset with the --server flag +* Cluster reset from non bootstrap nodes [(#8454)](https://github.com/k3s-io/k3s/pull/8454) +* Fix spellcheck problem [(#8511)](https://github.com/k3s-io/k3s/pull/8511) +* Take IPFamily precedence based on order [(#8506)](https://github.com/k3s-io/k3s/pull/8506) +* Network defaults are duplicated, remove one [(#8553)](https://github.com/k3s-io/k3s/pull/8553) +* Advertise address integration test [(#8518)](https://github.com/k3s-io/k3s/pull/8518) +* Fixed tailscale node IP dualstack mode in case of IPv4 only node [(#8560)](https://github.com/k3s-io/k3s/pull/8560) +* Server Token Rotation [(#8578)](https://github.com/k3s-io/k3s/pull/8578) + * Users can now rotate the server token using `k3s token rotate -t --new-token `. After command succeeds, all server nodes must be restarted with the new token. +* Clear remove annotations on cluster reset [(#8589)](https://github.com/k3s-io/k3s/pull/8589) + * Fixed an issue that could cause k3s to attempt to remove members from the etcd cluster immediately following a cluster-reset/restore, if they were queued for removal at the time the snapshot was taken. +* Use IPv6 in case is the first configured IP with dualstack [(#8599)](https://github.com/k3s-io/k3s/pull/8599) +* Backports for 2023-10 release [(#8617)](https://github.com/k3s-io/k3s/pull/8617) +* Update kube-router package in build script [(#8636)](https://github.com/k3s-io/k3s/pull/8636) +* Add etcd-only/control-plane-only server test and fix control-plane-only server crash [(#8644)](https://github.com/k3s-io/k3s/pull/8644) +* Windows agent support [(#8646)](https://github.com/k3s-io/k3s/pull/8646) +* Use `version.Program` not K3s in token rotate logs [(#8654)](https://github.com/k3s-io/k3s/pull/8654) +* Add --image-service-endpoint flag (#8279) [(#8664)](https://github.com/k3s-io/k3s/pull/8664) + * Add `--image-service-endpoint` flag to specify an external image service socket. +* Backport etcd fixes [(#8692)](https://github.com/k3s-io/k3s/pull/8692) + * Re-enable etcd endpoint auto-sync + * Manually requeue configmap reconcile when no nodes have reconciled snapshots +* Update to v1.25.15 and Go to v1.20.10 [(#8679)](https://github.com/k3s-io/k3s/pull/8679) +* Fix s3 snapshot restore [(#8735)](https://github.com/k3s-io/k3s/pull/8735) + +----- +## Release [v1.25.14+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.14+k3s1) + + +This release updates Kubernetes to v1.25.14, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12513). + +### Changes since v1.25.13+k3s1: + +* Bump kine to v0.10.3 [(#8326)](https://github.com/k3s-io/k3s/pull/8326) +* Update Kubernetes to v1.25.14 and go to 1.20.8 [(#8350)](https://github.com/k3s-io/k3s/pull/8350) +* Backport containerd bump and and test fixes [(#8384)](https://github.com/k3s-io/k3s/pull/8384) + * Bump embedded containerd to v1.7.6 + * Bump embedded stargz-snapshotter plugin to latest + * Fixed intermittent drone CI failures due to race conditions in test environment setup scripts + * Fixed CI failures due to changes to api discovery changes in Kubernetes 1.28 + +----- +## Release [v1.25.13+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.13+k3s1) + +This release updates Kubernetes to v1.25.13, and fixes a number of issues. + +:::warning Important +This release includes support for remediating CVE-2023-32187, a potential Denial of Service attack vector on K3s servers. See https://github.com/k3s-io/k3s/security/advisories/GHSA-m4hf-6vgr-75r2 for more information, including mandatory steps necessary to harden clusters against this vulnerability. +::: + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12512). + +### Changes since v1.25.12+k3s1: + +* Update flannel and plugins [(#8076)](https://github.com/k3s-io/k3s/pull/8076) +* Fix tailscale bug with ip modes [(#8098)](https://github.com/k3s-io/k3s/pull/8098) +* Etcd snapshots retention when node name changes [(#8123)](https://github.com/k3s-io/k3s/pull/8123) +* August Test Backports [(#8127)](https://github.com/k3s-io/k3s/pull/8127) +* Backports for 2023-08 release [(#8132)](https://github.com/k3s-io/k3s/pull/8132) + * K3s's external apiserver listener now declines to add to its certificate any subject names not associated with the kubernetes apiserver service, server nodes, or values of the --tls-san option. This prevents the certificate's SAN list from being filled with unwanted entries. + * K3s no longer enables the apiserver's `enable-aggregator-routing` flag when the egress proxy is not being used to route connections to in-cluster endpoints. + * Updated the embedded containerd to v1.7.3+k3s1 + * Updated the embedded runc to v1.1.8 + * User-provided containerd config templates may now use `{{ template "base" . }}` to include the default K3s template content. This makes it easier to maintain user configuration if the only need is to add additional sections to the file. + * Bump docker/docker module version to fix issues with cri-dockerd caused by recent releases of golang rejecting invalid host headers sent by the docker client. + * Updated kine to v0.10.2 +* K3s etcd-snapshot delete fail to delete local file when called with s3 flag [(#8145)](https://github.com/k3s-io/k3s/pull/8145) +* Fix for cluster-reset backup from s3 when etcd snapshots are disabled [(#8169)](https://github.com/k3s-io/k3s/pull/8169) +* Fixed the etcd retention to delete orphaned snapshots based on the date [(#8190)](https://github.com/k3s-io/k3s/pull/8190) +* Additional backports for 2023-08 release [(#8213)](https://github.com/k3s-io/k3s/pull/8213) + * The version of `helm` used by the bundled helm controller's job image has been updated to v3.12.3 + * Bumped dynamiclistener to address an issue that could cause the apiserver/supervisor listener on 6443 to stop serving requests on etcd-only nodes. + * The K3s external apiserver/supervisor listener on 6443 now sends a complete certificate chain in the TLS handshake. +* Move flannel to 0.22.2 [(#8223)](https://github.com/k3s-io/k3s/pull/8223) +* Update to v1.25.13 [(#8241)](https://github.com/k3s-io/k3s/pull/8241) +* Fix runc version bump [(#8246)](https://github.com/k3s-io/k3s/pull/8246) +* Add new CLI flag to enable TLS SAN CN filtering [(#8259)](https://github.com/k3s-io/k3s/pull/8259) + * Added a new `--tls-san-security` option. This flag defaults to false, but can be set to true to disable automatically adding SANs to the server's TLS certificate to satisfy any hostname requested by a client. +* Add RWMutex to address controller [(#8275)](https://github.com/k3s-io/k3s/pull/8275) + +----- +## Release [v1.25.12+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.12+k3s1) + +This release updates Kubernetes to v1.25.12, and fixes a number of issues. +​ +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12511). +​ +### Changes since v1.25.11+k3s1: +​ +* Remove file_windows.go [(#7856)](https://github.com/k3s-io/k3s/pull/7856) +* Fix code spell check [(#7860)](https://github.com/k3s-io/k3s/pull/7860) +* Allow k3s to customize apiServerPort on helm-controller [(#7873)](https://github.com/k3s-io/k3s/pull/7873) +* Check if we are on ipv4, ipv6 or dualStack when doing tailscale [(#7883)](https://github.com/k3s-io/k3s/pull/7883) +* Support setting control server URL for Tailscale. [(#7894)](https://github.com/k3s-io/k3s/pull/7894) +* S3 and Startup tests [(#7886)](https://github.com/k3s-io/k3s/pull/7886) +* Fix rootless node password [(#7900)](https://github.com/k3s-io/k3s/pull/7900) +* Backports for 2023-07 release [(#7909)](https://github.com/k3s-io/k3s/pull/7909) + * Resolved an issue that caused agents joined with kubeadm-style bootstrap tokens to fail to rejoin the cluster when their node object is deleted. + * The `k3s certificate rotate-ca` command now supports the data-dir flag. +* Adding cli to custom klipper helm image [(#7915)](https://github.com/k3s-io/k3s/pull/7915) + * The default helm-controller job image can now be overridden with the --helm-job-image CLI flag +* Generation of certs and keys for etcd gated if etcd is disabled [(#7945)](https://github.com/k3s-io/k3s/pull/7945) +* Don't use zgrep in `check-config` if apparmor profile is enforced [(#7954)](https://github.com/k3s-io/k3s/pull/7954) +* Fix image_scan.sh script and download trivy version (#7950) [(#7969)](https://github.com/k3s-io/k3s/pull/7969) +* Adjust default kubeconfig file permissions [(#7984)](https://github.com/k3s-io/k3s/pull/7984) +* Update to v1.25.12 [(#8021)](https://github.com/k3s-io/k3s/pull/8021) +​ +----- +## Release [v1.25.11+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.11+k3s1) + +This release updates Kubernetes to v1.25.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v12510). + +### Changes since v1.25.10+k3s1: + +* Update flannel version [(#7649)](https://github.com/k3s-io/k3s/pull/7649) +* Bump vagrant libvirt with fix for plugin installs [(#7659)](https://github.com/k3s-io/k3s/pull/7659) +* E2E Backports - June [(#7705)](https://github.com/k3s-io/k3s/pull/7705) + * Shortcircuit commands with version or help flags #7683 + * Add Rotation certification Check, remove func to restart agents #7097 + * E2E: Sudo for RunCmdOnNode #7686 +* Add private registry e2e test [(#7722)](https://github.com/k3s-io/k3s/pull/7722) +* VPN integration [(#7728)](https://github.com/k3s-io/k3s/pull/7728) +* Fix spelling test [(#7752)](https://github.com/k3s-io/k3s/pull/7752) +* Remove unused libvirt config [(#7758)](https://github.com/k3s-io/k3s/pull/7758) +* Backport version bumps and bugfixes [(#7718)](https://github.com/k3s-io/k3s/pull/7718) + * The bundled metrics-server has been bumped to v0.6.3, and now uses only secure TLS ciphers by default. + * The `coredns-custom` ConfigMap now allows for `*.override` sections to be included in the `.:53` default server block. + * The K3s core controllers (supervisor, deploy, and helm) no longer use the admin kubeconfig. This makes it easier to determine from access and audit logs which actions are performed by the system, and which are performed by an administrative user. + * Bumped klipper-lb image to v0.4.4 to resolve an issue that prevented access to ServiceLB ports from localhost when the Service ExternalTrafficPolicy was set to Local. + * Make LB image configurable when compiling k3s + * K3s now allows nodes to join the cluster even if the node password secret cannot be created at the time the node joins. The secret create will be retried in the background. This resolves a potential deadlock created by fail-closed validating webhooks that block secret creation, where the webhook is unavailable until new nodes join the cluster to run the webhook pod. + * The bundled containerd's aufs/devmapper/zfs snapshotter plugins have been restored. These were unintentionally omitted when moving containerd back into the k3s multicall binary in the previous release. + * The embedded helm controller has been bumped to v0.15.0, and now supports creating the chart's target namespace if it does not exist. +* Add format command on Makefile [(#7763)](https://github.com/k3s-io/k3s/pull/7763) +* Fix logging and cleanup in Tailscale [(#7784)](https://github.com/k3s-io/k3s/pull/7784) +* Update Kubernetes to v1.25.11 [(#7788)](https://github.com/k3s-io/k3s/pull/7788) +* Path normalization affecting kubectl proxy conformance test for /api endpoint [(#7818)](https://github.com/k3s-io/k3s/pull/7818) + +----- +## Release [v1.25.10+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.10+k3s1) + +This release updates Kubernetes to v1.25.10, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1259). + +### Changes since v1.25.9+k3s1: + +* Ensure that klog verbosity is set to the same level as logrus [(#7361)](https://github.com/k3s-io/k3s/pull/7361) +* Add E2E testing in Drone [(#7375)](https://github.com/k3s-io/k3s/pull/7375) +* Add integration tests for etc-snapshot server flags #7377 [(#7378)](https://github.com/k3s-io/k3s/pull/7378) +* CLI + Config Enhancement [(#7404)](https://github.com/k3s-io/k3s/pull/7404) + * `--Tls-sans` now accepts multiple arguments: `--tls-sans="foo,bar"` + * `Prefer-bundled-bin: true` now works properly when set in `config.yaml.d` files +* Migrate netutil methods into /utils/net.go [(#7433)](https://github.com/k3s-io/k3s/pull/7433) +* Bump Runc + Containerd + Docker for CVE fixes [(#7452)](https://github.com/k3s-io/k3s/pull/7452) +* Bump kube-router version to fix a bug when a port name is used [(#7461)](https://github.com/k3s-io/k3s/pull/7461) +* Kube flags and longhorn storage tests 1.25 [(#7466)](https://github.com/k3s-io/k3s/pull/7466) +* Local-storage: Fix permission [(#7473)](https://github.com/k3s-io/k3s/pull/7473) +* Backport version bumps and bugfixes [(#7515)](https://github.com/k3s-io/k3s/pull/7515) + * K3s now retries the cluster join operation when receiving a "too many learners" error from etcd. This most frequently occurred when attempting to add multiple servers at the same time. + * K3s once again supports aarch64 nodes with page size > 4k + * The packaged Traefik version has been bumped to v2.9.10 / chart 21.2.0 + * K3s now prints a more meaningful error when attempting to run from a filesystem mounted `noexec`. + * K3s now exits with a proper error message when the server token uses a bootstrap token `id.secret` format. + * Fixed an issue where Addon, HelmChart, and HelmChartConfig CRDs were created without structural schema, allowing the creation of custom resources of these types with invalid content. + * Servers started with the (experimental) --disable-agent flag no longer attempt to run the tunnel authorizer agent component. + * Fixed an regression that prevented the pod and cluster egress-selector modes from working properly. + * K3s now correctly passes through etcd-args to the temporary etcd that is used to extract cluster bootstrap data when restarting managed etcd nodes. + * K3s now properly handles errors obtaining the current etcd cluster member list when a new server is joining the managed etcd cluster. + * The embedded kine version has been bumped to v0.10.1. This replaces the legacy `lib/pq` postgres driver with `pgx`. + * The bundled CNI plugins have been upgraded to v1.2.0-k3s1. The bandwidth and firewall plugins are now included in the bundle. + * The embedded Helm controller now supports authenticating to chart repositories via credentials stored in a Secret, as well as passing repo CAs via ConfigMap. +* Bump containerd/runc to v1.7.1-k3s1/v1.1.7 [(#7535)](https://github.com/k3s-io/k3s/pull/7535) + * The bundled containerd and runc versions have been bumped to v1.7.1-k3s1/v1.1.7 +* Wrap error stating that it is coming from netpol [(#7548)](https://github.com/k3s-io/k3s/pull/7548) +* Add '-all' flag to apply to inactive units [(#7574)](https://github.com/k3s-io/k3s/pull/7574) +* Update to v1.25.10-k3s1 [(#7582)](https://github.com/k3s-io/k3s/pull/7582) + +----- +## Release [v1.25.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.9+k3s1) + +This release updates Kubernetes to v1.25.9, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1258). + +### Changes since v1.25.8+k3s1: + +* Enhance `check-config` [(#7164)](https://github.com/k3s-io/k3s/pull/7164) +* Remove deprecated nodeSelector label beta.kubernetes.io/os (#6970) [(#7121)](https://github.com/k3s-io/k3s/pull/7121) +* Backport version bumps and bugfixes [(#7228)](https://github.com/k3s-io/k3s/pull/7228) + * The bundled local-path-provisioner version has been bumped to v0.0.24 + * The bundled runc version has been bumped to v1.1.5 + * The bundled coredns version has been bumped to v1.10.1 + * When using an external datastore, K3s now locks the bootstrap key while creating initial cluster bootstrap data, preventing a race condition when multiple servers attempted to initialize the cluster simultaneously. + * The client load-balancer that maintains connections to active server nodes now closes connections to servers when they are removed from the cluster. This ensures that agent components immediately reconnect to a current cluster member. + * Fixed a race condition during cluster reset that could cause the operation to hang and time out. +* Updated kube-router to move the default ACCEPT rule at the end of the chain [(#7221)](https://github.com/k3s-io/k3s/pull/7221) + * The embedded kube-router controller has been updated to fix a regression that caused traffic from pods to be blocked by any default drop/deny rules present on the host. Users should still confirm that any externally-managed firewall rules explicitly allow traffic to/from pod and service networks, but this returns the old behavior that was relied upon by some users. +* Update klipper lb and helm-controller [(#7240)](https://github.com/k3s-io/k3s/pull/7240) +* Update Kube-router ACCEPT rule insertion and install script to clean rules before start [(#7276)](https://github.com/k3s-io/k3s/pull/7276) + * The embedded kube-router controller has been updated to fix a regression that caused traffic from pods to be blocked by any default drop/deny rules present on the host. Users should still confirm that any externally-managed firewall rules explicitly allow traffic to/from pod and service networks, but this returns the old behavior that was relied upon by some users. +* Update to v1.25.9-k3s1 [(#7283)](https://github.com/k3s-io/k3s/pull/7283) + +----- +## Release [v1.25.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.8+k3s1) + +This release updates Kubernetes to v1.25.8, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1257). + +### Changes since v1.25.7+k3s1: + +* Update flannel and kube-router [(#7061)](https://github.com/k3s-io/k3s/pull/7061) +* Bump various dependencies for CVEs [(#7043)](https://github.com/k3s-io/k3s/pull/7043) +* Enable dependabot [(#7045)](https://github.com/k3s-io/k3s/pull/7045) +* Wait for kubelet port to be ready before setting [(#7064)](https://github.com/k3s-io/k3s/pull/7064) + * The agent tunnel authorizer now waits for the kubelet to be ready before reading the kubelet port from the node object. +* Adds a warning about editing to the containerd config.toml file [(#7075)](https://github.com/k3s-io/k3s/pull/7075) +* Improve support for rotating the default self-signed certs [(#7079)](https://github.com/k3s-io/k3s/pull/7079) + * The `k3s certificate rotate-ca` checks now support rotating self-signed certificates without the `--force` option. +* Update to v1.25.8-k3s1 [(#7106)](https://github.com/k3s-io/k3s/pull/7106) +* Update flannel to fix NAT issue with old iptables version [(#7138)](https://github.com/k3s-io/k3s/pull/7138) + +----- +## Release [v1.25.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.7+k3s1) + +This release updates Kubernetes to v1.25.7, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1256). + +### Changes since v1.25.6+k3s1: + +* Add jitter to scheduled snapshots and retry harder on conflicts [(#6782)](https://github.com/k3s-io/k3s/pull/6782) + * Scheduled etcd snapshots are now offset by a short random delay of up to several seconds. This should prevent multi-server clusters from executing pathological behavior when attempting to simultaneously update the snapshot list ConfigMap. The snapshot controller will also be more persistent in attempting to update the snapshot list. +* Bump cri-dockerd [(#6798)](https://github.com/k3s-io/k3s/pull/6798) + * The embedded cri-dockerd has been updated to v0.3.1 +* Bugfix: do not break cert-manager when pprof is enabled [(#6837)](https://github.com/k3s-io/k3s/pull/6837) +* Wait for cri-dockerd socket [(#6853)](https://github.com/k3s-io/k3s/pull/6853) +* Bump vagrant boxes to fedora37 [(#6858)](https://github.com/k3s-io/k3s/pull/6858) +* Fix cronjob example [(#6864)](https://github.com/k3s-io/k3s/pull/6864) +* Ensure flag type consistency [(#6867)](https://github.com/k3s-io/k3s/pull/6867) +* Consolidate E2E tests [(#6887)](https://github.com/k3s-io/k3s/pull/6887) +* Ignore value conflicts when reencrypting secrets [(#6919)](https://github.com/k3s-io/k3s/pull/6919) +* Use default address family when adding kubernetes service address to SAN list [(#6904)](https://github.com/k3s-io/k3s/pull/6904) + * The apiserver advertised address and IP SAN entry are now set correctly on clusters that use IPv6 as the default IP family. +* Allow ServiceLB to honor `ExternalTrafficPolicy=Local` [(#6907)](https://github.com/k3s-io/k3s/pull/6907) + * ServiceLB now honors the Service's ExternalTrafficPolicy. When set to Local, the LoadBalancer will only advertise addresses of Nodes with a Pod for the Service, and will not forward traffic to other cluster members. +* Fix issue with servicelb startup failure when validating webhooks block creation [(#6916)](https://github.com/k3s-io/k3s/pull/6916) + * The embedded cloud controller manager will no longer attempt to unconditionally re-create its namespace and serviceaccount on startup. This resolves an issue that could cause a deadlocked cluster when fail-closed webhooks are in use. +* Backport user-provided CA cert and `kubeadm` bootstrap token support [(#6929)](https://github.com/k3s-io/k3s/pull/6929) + * K3s now functions properly when the cluster CA certificates are signed by an existing root or intermediate CA. You can find a sample script for generating such certificates before K3s starts in the github repo at [contrib/util/certs.sh](https://github.com/k3s-io/k3s/blob/master/contrib/util/certs.sh). + * K3s now supports `kubeadm` style join tokens. `k3s token create` now creates join token secrets, optionally with a limited TTL. + * K3s agents joined with an expired or deleted token stay in the cluster using existing client certificates via the NodeAuthorization admission plugin, unless their Node object is deleted from the cluster. +* Fix access to hostNetwork port on NodeIP when egress-selector-mode=agent [(#6936)](https://github.com/k3s-io/k3s/pull/6936) + * Fixed an issue that would cause the apiserver egress proxy to attempt to use the agent tunnel to connect to service endpoints even in agent or disabled mode. +* Updated flannel version to v0.21.1 [(#6915)](https://github.com/k3s-io/k3s/pull/6915) +* Allow for multiple sets of leader-elected controllers [(#6941)](https://github.com/k3s-io/k3s/pull/6941) + * Fixed an issue where leader-elected controllers for managed etcd did not run on etcd-only nodes +* Fix etcd and ca-cert rotate issues [(#6954)](https://github.com/k3s-io/k3s/pull/6954) +* Fix ServiceLB dual-stack ingress IP listing [(#6987)](https://github.com/k3s-io/k3s/pull/6987) + * Resolved an issue with ServiceLB that would cause it to advertise node IPv6 addresses, even if the cluster or service was not enabled for dual-stack operation. +* Bump kine to v0.9.9 [(#6975)](https://github.com/k3s-io/k3s/pull/6975) + * The embedded kine version has been bumped to v0.9.9. Compaction log messages are now omitted at `info` level for increased visibility. +* Update to v1.25.7-k3s1 [(#7010)](https://github.com/k3s-io/k3s/pull/7010) + +----- +## Release [v1.25.6+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.6+k3s1) + +This release updates Kubernetes to v1.25.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1255). + +### Changes since v1.25.5+k3s2: + +* Pass through default tls-cipher-suites [(#6730)](https://github.com/k3s-io/k3s/pull/6730) + * The K3s default cipher suites are now explicitly passed in to kube-apiserver, ensuring that all listeners use these values. +* Bump containerd to v1.6.15-k3s1 [(#6735)](https://github.com/k3s-io/k3s/pull/6735) + * The embedded containerd version has been bumped to v1.6.15-k3s1 +* Bump action/download-artifact to v3 [(#6747)](https://github.com/k3s-io/k3s/pull/6747) +* Backport dependabot/updatecli updates [(#6761)](https://github.com/k3s-io/k3s/pull/6761) +* Fix Drone plugins/docker tag for 32 bit arm [(#6768)](https://github.com/k3s-io/k3s/pull/6768) +* Update to v1.25.6+k3s1 [(#6775)](https://github.com/k3s-io/k3s/pull/6775) + +----- +## Release [v1.25.5+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.25.5+k3s2) + + +This release updates containerd to v1.6.14 to resolve an issue where pods would lose their CNI information when containerd was restarted. + +### Changes since v1.25.5+k3s1: + +* Bump containerd to v1.6.14-k3s1 [(#6694)](https://github.com/k3s-io/k3s/pull/6694) + * The embedded containerd version has been bumped to v1.6.14-k3s1. This includes a backported fix for [containerd/7843](https://github.com/containerd/containerd/issues/7843) which caused pods to lose their CNI info when containerd was restarted, which in turn caused the kubelet to recreate the pod. + +----- +## Release [v1.25.5+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.5+k3s1) + + +> ## ⚠️ WARNING +> This release is affected by https://github.com/containerd/containerd/issues/7843, which causes the kubelet to restart all pods whenever K3s is restarted. For this reason, we have removed this K3s release from the channel server. Please use `v1.25.5+k3s2` instead. + +This release updates Kubernetes to v1.25.5, and fixes a number of issues. + +**Breaking Change:** K3s no longer includes `swanctl` and `charon` binaries. If you are using the ipsec flannel backend, please ensure that the strongswan `swanctl` and `charon` packages are installed on your node before upgrading K3s to this release. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1254). + +### Changes since v1.25.4+k3s1: + +* Fix log for flannelExternalIP use case [(#6531)](https://github.com/k3s-io/k3s/pull/6531) +* Fix Carolines github id [(#6464)](https://github.com/k3s-io/k3s/pull/6464) +* Github CI Updates [(#6522)](https://github.com/k3s-io/k3s/pull/6522) +* Add new `prefer-bundled-bin` experimental flag [(#6420)](https://github.com/k3s-io/k3s/pull/6420) + * Added new prefer-bundled-bin flag which force K3s to use its bundle binaries over that of the host tools +* Bump containerd to v1.6.10 [(#6512)](https://github.com/k3s-io/k3s/pull/6512) + * The embedded containerd version has been updated to v1.6.10-k3s1 +* Stage the Traefik charts through k3s-charts [(#6519)](https://github.com/k3s-io/k3s/pull/6519) +* Make rootless settings configurable [(#6498)](https://github.com/k3s-io/k3s/pull/6498) + * The rootless `port-driver`, `cidr`, `mtu`, `enable-ipv6`, and `disable-host-loopback` settings can now be configured via environment variables. +* Remove stuff which belongs in the windows executor implementation [(#6517)](https://github.com/k3s-io/k3s/pull/6517) +* Mark v1.25.4+k3s1 as stable [(#6534)](https://github.com/k3s-io/k3s/pull/6534) +* Add `prefer-bundled-bin` as an agent flag [(#6545)](https://github.com/k3s-io/k3s/pull/6545) +* Bump klipper-helm and klipper-lb versions [(#6549)](https://github.com/k3s-io/k3s/pull/6549) + * The embedded Load-Balancer controller image has been bumped to klipper-lb:v0.4.0, which includes support for the [LoadBalancerSourceRanges](https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#:~:text=loadBalancerSourceRanges) field. + * The embedded Helm controller image has been bumped to klipper-helm:v0.7.4-build20221121 +* Switch from Google Buckets to AWS S3 Buckets [(#6497)](https://github.com/k3s-io/k3s/pull/6497) +* Fix passing AWS creds through Dapper [(#6567)](https://github.com/k3s-io/k3s/pull/6567) +* Fix artifact upload with `aws s3 cp` [(#6568)](https://github.com/k3s-io/k3s/pull/6568) +* Disable CCM metrics port when legacy CCM functionality is disabled [(#6572)](https://github.com/k3s-io/k3s/pull/6572) + * The embedded cloud-controller-manager's metrics listener on port 10258 is now disabled when the `--disable-cloud-controller` flag is set. +* Sync packaged component Deployment config [(#6552)](https://github.com/k3s-io/k3s/pull/6552) + * Deployments for K3s packaged components now have consistent upgrade strategy and revisionHistoryLimit settings, and will not override scaling decisions by hardcoding the replica count. + * The packaged metrics-server has been bumped to v0.6.2 +* Mark secrets-encryption flag as GA [(#6582)](https://github.com/k3s-io/k3s/pull/6582) +* Bump k3s root to v0.12.0 and remove strongswan binaries [(#6400)](https://github.com/k3s-io/k3s/pull/6400) + * The embedded k3s-root version has been bumped to v0.12.0, based on buildroot 2022.08.1. + * The embedded swanctl and charon binaries have been removed. If you are using the ipsec flannel backend, please ensure that the strongswan `swanctl` and `charon` packages are installed on your node before upgrading k3s. +* Update flannel to v0.20.2 [(#6588)](https://github.com/k3s-io/k3s/pull/6588) +* Add ADR for security bumps automation [(#6559)](https://github.com/k3s-io/k3s/pull/6559) +* Update node12->node16 based GH actions [(#6593)](https://github.com/k3s-io/k3s/pull/6593) +* Updating rel docs [(#6237)](https://github.com/k3s-io/k3s/pull/6237) +* Update install.sh to recommend current version of k3s-selinux [(#6453)](https://github.com/k3s-io/k3s/pull/6453) +* Update to v1.25.5-k3s1 [(#6622)](https://github.com/k3s-io/k3s/pull/6622) +* Bump containerd to v1.6.12-k3s1 [(#6631)](https://github.com/k3s-io/k3s/pull/6631) + * The embedded containerd version has been bumped to v1.6.12 +* Preload iptable_filter/ip6table_filter [(#6646)](https://github.com/k3s-io/k3s/pull/6646) + +----- +## Release [v1.25.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.4+k3s1) + +This release updates Kubernetes to v1.25.4, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1253). + +### Changes since v1.25.3+k3s1: + +* Add the gateway parameter in netplan [(#6292)](https://github.com/k3s-io/k3s/pull/6292) +* Bumped dynamiclistener library to v0.3.5 [(#6300)](https://github.com/k3s-io/k3s/pull/6300) +* Update kube-router to v1.5.1 with extra logging [(#6345)](https://github.com/k3s-io/k3s/pull/6345) +* Update maintainers [(#6298)](https://github.com/k3s-io/k3s/pull/6298) +* Bump testing to opensuse Leap 15.4 [(#6337)](https://github.com/k3s-io/k3s/pull/6337) +* Update E2E docs with more info on ubuntu 22.04 [(#6316)](https://github.com/k3s-io/k3s/pull/6316) +* Netpol test for podSelector & ingress [(#6247)](https://github.com/k3s-io/k3s/pull/6247) +* Bump all alpine images to 3.16 [(#6334)](https://github.com/k3s-io/k3s/pull/6334) +* Bump kine to v0.9.6 / sqlite3 v3.39.2 ([CVE-2022-35737](https://nvd.nist.gov/vuln/detail/CVE-2022-35737)) [(#6317)](https://github.com/k3s-io/k3s/pull/6317) +* Add hardened cluster and upgrade tests [(#6320)](https://github.com/k3s-io/k3s/pull/6320) +* The bundled Traefik helm chart has been updated to v18.0.0 [(#6353)](https://github.com/k3s-io/k3s/pull/6353) +* Mark v1.25.3+k3s1 as stable [(#6338)](https://github.com/k3s-io/k3s/pull/6338) +* The embedded helm controller has been bumped to v0.13.0 [(#6294)](https://github.com/k3s-io/k3s/pull/6294) +* Fixed an issue that would prevent the deploy controller from handling manifests that include resource types that are no longer supported by the apiserver. [(#6295)](https://github.com/k3s-io/k3s/pull/6295) +* Replace fedora-coreos with fedora 36 for install tests [(#6315)](https://github.com/k3s-io/k3s/pull/6315) +* Convert containerd config.toml.tmpl Linux template to v2 syntax [(#6267)](https://github.com/k3s-io/k3s/pull/6267) +* Add test for node-external-ip config parameter [(#6359)](https://github.com/k3s-io/k3s/pull/6359) +* Use debugger-friendly compile settings if DEBUG is set [(#6147)](https://github.com/k3s-io/k3s/pull/6147) +* update e2e tests [(#6354)](https://github.com/k3s-io/k3s/pull/6354) +* Remove unused vagrant development scripts [(#6395)](https://github.com/k3s-io/k3s/pull/6395) +* The bundled Traefik has been updated to v2.9.4 / helm chart v18.3.0 [(#6397)](https://github.com/k3s-io/k3s/pull/6397) +* None [(#6371)](https://github.com/k3s-io/k3s/pull/6371) +* Fix incorrect defer usage [(#6296)](https://github.com/k3s-io/k3s/pull/6296) +* Add snapshot restore e2e test [(#6396)](https://github.com/k3s-io/k3s/pull/6396) +* Fix sonobouy tests on v1.25 [(#6399)](https://github.com/k3s-io/k3s/pull/6399) +* Bump packaged component versions + * The packaged traefik helm chart has been bumped to v19.0.0, enabling ingressClass support by default. + * The packaged local-path-provisioner has been bumped to v0.0.23 + * The packaged coredns has been bumped to v1.9.4 [(#6408)](https://github.com/k3s-io/k3s/pull/6408) +* log kube-router version when starting netpol controller [(#6405)](https://github.com/k3s-io/k3s/pull/6405) +* Add Kairos to ADOPTERS [(#6417)](https://github.com/k3s-io/k3s/pull/6417) +* Update Flannel to 0.20.1 [(#6388)](https://github.com/k3s-io/k3s/pull/6388) +* Avoid wrong config for `flannel-external-ip` and add warning if unencrypted backend [(#6403)](https://github.com/k3s-io/k3s/pull/6403) +* Fix test-mods to allow for pinning version from k8s.io [(#6413)](https://github.com/k3s-io/k3s/pull/6413) +* Fix for metrics-server in the multi-cloud cluster env [(#6386)](https://github.com/k3s-io/k3s/pull/6386) +* K3s now indicates specifically which cluster-level configuration flags are out of sync when critical configuration differs between server nodes. [(#6409)](https://github.com/k3s-io/k3s/pull/6409) +* Convert test output to JSON format [(#6410)](https://github.com/k3s-io/k3s/pull/6410) +* Pull traefik helm chart directly from GH [(#6468)](https://github.com/k3s-io/k3s/pull/6468) +* Nightly test fix [(#6475)](https://github.com/k3s-io/k3s/pull/6475) +* Update to v1.25.4 [(#6477)](https://github.com/k3s-io/k3s/pull/6477) +* Remove stuff which belongs in the windows executor implementation [(#6492)](https://github.com/k3s-io/k3s/pull/6492) +* The packaged traefik helm chart has been bumped to 19.0.4 [(#6494)](https://github.com/k3s-io/k3s/pull/6494) +* Move traefik chart repo again [(#6508)](https://github.com/k3s-io/k3s/pull/6508) + +----- +## Release [v1.25.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.3+k3s1) + +This release updates Kubernetes to v1.25.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1252). + +### Changes since v1.25.2+k3s1: + +* E2E: Groundwork for PR runs [(#6131)](https://github.com/k3s-io/k3s/pull/6131) +* Fix flannel for deployments of nodes which do not belong to the same network and connect using their public IP [(#6180)](https://github.com/k3s-io/k3s/pull/6180) +* Mark v1.24.6+k3s1 as stable [(#6193)](https://github.com/k3s-io/k3s/pull/6193) +* Add cluster reset test [(#6161)](https://github.com/k3s-io/k3s/pull/6161) +* The embedded metrics-server version has been bumped to v0.6.1 [(#6151)](https://github.com/k3s-io/k3s/pull/6151) +* The ServiceLB (klipper-lb) service controller is now integrated into the K3s stub cloud controller manager. [(#6181)](https://github.com/k3s-io/k3s/pull/6181) +* Events recorded to the cluster by embedded controllers are now properly formatted in the service logs. [(#6203)](https://github.com/k3s-io/k3s/pull/6203) +* Fix `error dialing backend` errors in apiserver network proxy [(#6216)](https://github.com/k3s-io/k3s/pull/6216) + * Fixed an issue with the apiserver network proxy that caused `kubectl exec` to occasionally fail with `error dialing backend: EOF` + * Fixed an issue with the apiserver network proxy that caused `kubectl exec` and `kubectl logs` to fail when a custom kubelet port was used, and the custom port was blocked by firewall or security group rules. +* Fix the typo in the test [(#6183)](https://github.com/k3s-io/k3s/pull/6183) +* Use setup-go action to cache dependencies [(#6220)](https://github.com/k3s-io/k3s/pull/6220) +* Add journalctl logs to E2E tests [(#6224)](https://github.com/k3s-io/k3s/pull/6224) +* The embedded Traefik version has been bumped to v2.9.1 / chart 12.0.0 [(#6223)](https://github.com/k3s-io/k3s/pull/6223) +* Fix flakey etcd test [(#6232)](https://github.com/k3s-io/k3s/pull/6232) +* Replace deprecated ioutil package [(#6230)](https://github.com/k3s-io/k3s/pull/6230) +* Fix dualStack test [(#6245)](https://github.com/k3s-io/k3s/pull/6245) +* Add ServiceAccount for svclb pods [(#6253)](https://github.com/k3s-io/k3s/pull/6253) +* Update to v1.25.3-k3s1 [(#6269)](https://github.com/k3s-io/k3s/pull/6269) +* Return ProviderID in URI format [(#6284)](https://github.com/k3s-io/k3s/pull/6284) +* Corrected CCM RBAC to allow for removal of legacy service finalizer during upgrades. [(#6306)](https://github.com/k3s-io/k3s/pull/6306) +* Added a new --flannel-external-ip flag. [(#6321)](https://github.com/k3s-io/k3s/pull/6321) + * When enabled, Flannel traffic will now use the nodes external IPs, instead of internal. + * This is meant for use with distributed clusters that are not all on the same local network. + +----- +## Release [v1.25.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.2+k3s1) + +This release updates Kubernetes to v1.25.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1250). + +### Changes since v1.25.0+k3s1: + +* Add k3s v1.25 to the release channel [(#6129)](https://github.com/k3s-io/k3s/pull/6129) +* Restore original INSTALL_K3S_SKIP_DOWNLOAD behavior [(#6130)](https://github.com/k3s-io/k3s/pull/6130) +* Add K3S Release Documentation [(#6135)](https://github.com/k3s-io/k3s/pull/6135) +* Update to v1.25.1 [(#6140)](https://github.com/k3s-io/k3s/pull/6140) +* Update to v1.25.2-k3s1 [(#6168)](https://github.com/k3s-io/k3s/pull/6168) + +----- +## Release [v1.25.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.25.0+k3s1) + +This release is K3S's first in the v1.25 line. This release updates Kubernetes to v1.25.0. + +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#urgent-upgrade-notes). + +**Important Note:** Kubernetes v1.25 removes the beta `PodSecurityPolicy` admission plugin. Please follow the [upstream documentation](https://kubernetes.io/docs/tasks/configure-pod-container/migrate-from-psp/) to migrate from PSP if using the built-in PodSecurity Admission Plugin, prior to upgrading to v1.25.0+k3s1. + +### Changes since v1.24.4+k3s1: + +* Update Kubernetes to v1.25.0 [(#6040)](https://github.com/k3s-io/k3s/pull/6040) +* Remove `--containerd` flag from windows kubelet args [(#6028)](https://github.com/k3s-io/k3s/pull/6028) +* E2E: Add support for CentOS 7 and Rocky 8 [(#6015)](https://github.com/k3s-io/k3s/pull/6015) +* Convert install tests to run PR build of k3s [(#6003)](https://github.com/k3s-io/k3s/pull/6003) +* CI: update Fedora 34 -> 35 [(#5996)](https://github.com/k3s-io/k3s/pull/5996) +* Fix dualStack test and change ipv6 network prefix [(#6023)](https://github.com/k3s-io/k3s/pull/6023) +* Fix e2e tests [(#6018)](https://github.com/k3s-io/k3s/pull/6018) +* Update README.md [(#6048)](https://github.com/k3s-io/k3s/pull/6048) +* Remove wireguard interfaces when deleting the cluster [(#6055)](https://github.com/k3s-io/k3s/pull/6055) +* Add validation check to confirm correct golang version for Kubernetes [(#6050)](https://github.com/k3s-io/k3s/pull/6050) +* Expand startup integration test [(#6030)](https://github.com/k3s-io/k3s/pull/6030) +* Update go.mod version to 1.19 [(#6049)](https://github.com/k3s-io/k3s/pull/6049) +* Usage of `--cluster-secret`, `--no-deploy`, and `--no-flannel` is no longer supported. Attempts to use these flags will cause fatal errors. See [the docs](https://k3s-io.github.io/docs/reference/server-config#deprecated-options) for their replacement. [(#6069)](https://github.com/k3s-io/k3s/pull/6069) +* Update Flannel version to fix older iptables version issue. [(#6090)](https://github.com/k3s-io/k3s/pull/6090) +* The bundled version of runc has been bumped to v1.1.4 [(#6071)](https://github.com/k3s-io/k3s/pull/6071) +* The embedded containerd version has been bumped to v1.6.8-k3s1 [(#6078)](https://github.com/k3s-io/k3s/pull/6078) +* Fix deprecation message [(#6112)](https://github.com/k3s-io/k3s/pull/6112) +* Added warning message for flannel backend additional options deprecation [(#6111)](https://github.com/k3s-io/k3s/pull/6111) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.26.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.26.X.md new file mode 100644 index 000000000..c29d6e2da --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.26.X.md @@ -0,0 +1,606 @@ +--- +hide_table_of_contents: true +sidebar_position: 7 +--- + +# v1.26.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.26.15+k3s1](v1.26.X.md#release-v12615k3s1) | Mar 25 2024| [v1.26.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12615) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.26) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.26.14+k3s1](v1.26.X.md#release-v12614k3s1) | Feb 29 2024| [v1.26.14](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12614) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.26) | [v1.1.12-k3s1](https://github.com/k3s-io/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.26.13+k3s2](v1.26.X.md#release-v12613k3s2) | Feb 06 2024| [v1.26.13](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12613) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.26) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.12+k3s1](v1.26.X.md#release-v12612k3s1) | Dec 27 2023| [v1.26.12](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12612) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.26) | [v1.1.10](https://github.com/opencontainers/runc/releases/tag/v1.1.10) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.11+k3s2](v1.26.X.md#release-v12611k3s2) | Dec 07 2023| [v1.26.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12611) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1.26) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.10+k3s2](v1.26.X.md#release-v12610k3s2) | Nov 08 2023| [v1.26.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12610) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1.26) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.10+k3s1](v1.26.X.md#release-v12610k3s1) | Oct 30 2023| [v1.26.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v12610) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1.26) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.9+k3s1](v1.26.X.md#release-v1269k3s1) | Sep 20 2023| [v1.26.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1269) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.6-k3s1.26](https://github.com/k3s-io/containerd/releases/tag/v1.7.6-k3s1.26) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.8+k3s1](v1.26.X.md#release-v1268k3s1) | Sep 05 2023| [v1.26.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1268) | [v0.10.2](https://github.com/k3s-io/kine/releases/tag/v0.10.2) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.3-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.3-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.7+k3s1](v1.26.X.md#release-v1267k3s1) | Jul 27 2023| [v1.26.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1267) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.22.0](https://github.com/flannel-io/flannel/releases/tag/v0.22.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.2](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.2) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.6+k3s1](v1.26.X.md#release-v1266k3s1) | Jun 26 2023| [v1.26.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1266) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.22.0](https://github.com/flannel-io/flannel/releases/tag/v0.22.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.5+k3s1](v1.26.X.md#release-v1265k3s1) | May 26 2023| [v1.26.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1265) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.14.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.14.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.4+k3s1](v1.26.X.md#release-v1264k3s1) | Apr 20 2023| [v1.26.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1264) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.5](https://github.com/opencontainers/runc/releases/tag/v1.1.5) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.13.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.3) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.26.3+k3s1](v1.26.X.md#release-v1263k3s1) | Mar 27 2023| [v1.26.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1263) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.5-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.5-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.26.2+k3s1](v1.26.X.md#release-v1262k3s1) | Mar 10 2023| [v1.26.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1262) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.5-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.5-k3s1) | [v1.6.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.15-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.21.1](https://github.com/flannel-io/flannel/releases/tag/v0.21.1) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.26.1+k3s1](v1.26.X.md#release-v1261k3s1) | Jan 26 2023| [v1.26.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1261) | [v0.9.8](https://github.com/k3s-io/kine/releases/tag/v0.9.8) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.5-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.5-k3s1) | [v1.6.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.15-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2](https://github.com/flannel-io/flannel/releases/tag/v0.20.2) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.26.0+k3s2](v1.26.X.md#release-v1260k3s2) | Jan 11 2023| [v1.26.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1260) | [v0.9.8](https://github.com/k3s-io/kine/releases/tag/v0.9.8) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.5-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.5-k3s1) | [v1.6.14-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.14-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2](https://github.com/flannel-io/flannel/releases/tag/v0.20.2) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | +| [v1.26.0+k3s1](v1.26.X.md#release-v1260k3s1) | Dec 21 2022| [v1.26.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1260) | [v0.9.8](https://github.com/k3s-io/kine/releases/tag/v0.9.8) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.5-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.5-k3s1) | [v1.6.12-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.12-k3s1) | [v1.1.4](https://github.com/opencontainers/runc/releases/tag/v1.1.4) | [v0.20.2](https://github.com/flannel-io/flannel/releases/tag/v0.20.2) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.9.4](https://github.com/coredns/coredns/releases/tag/v1.9.4) | [v0.13.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.1) | [v0.0.23](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.23) | + +
+ +## Release [v1.26.15+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.15+k3s1) + + +This release updates Kubernetes to v1.26.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v12614). + +### Changes since v1.26.14+k3s1: + +* Update klipper-lb image version [(#9607)](https://github.com/k3s-io/k3s/pull/9607) +* Install and Unit test backports [(#9645)](https://github.com/k3s-io/k3s/pull/9645) +* Adjust first node-ip based on configured clusterCIDR [(#9633)](https://github.com/k3s-io/k3s/pull/9633) +* Add an integration test for flannel-backend=none [(#9610)](https://github.com/k3s-io/k3s/pull/9610) +* Improve tailscale e2e test [(#9655)](https://github.com/k3s-io/k3s/pull/9655) +* Backports for 2024-03 release cycle [(#9692)](https://github.com/k3s-io/k3s/pull/9692) + * Fix: use correct wasm shims names + * The embedded flannel cni-plugin binary is now built and versioned separate from the rest of the cni plugins and the embedded flannel controller. + * Bump spegel to v0.0.18-k3s3 + * Adds wildcard registry support + * Fixes issue with excessive CPU utilization while waiting for containerd to start + * Add env var to allow spegel mirroring of latest tag + * Tweak netpol node wait logs + * Fix coredns NodeHosts on dual-stack clusters + * Bump helm-controller/klipper-helm versions + * Fix snapshot prune + * Fix issue with etcd node name missing hostname + * Rootless mode should also bind service nodePort to host for LoadBalancer type, matching UX of rootful mode. + * To enable raw output for the `check-config` subcommand, you may now set NO_COLOR=1 + * Fix additional corner cases in registries handling + * Bump metrics-server to v0.7.0 + * K3s will now warn and suppress duplicate entries in the mirror endpoint list for a registry. Containerd does not support listing the same endpoint multiple times as a mirror for a single upstream registry. +* Fix wildcard entry upstream fallback [(#9735)](https://github.com/k3s-io/k3s/pull/9735) +* Update to v1.26.15-k3s1 and Go 1.21.8 [(#9740)](https://github.com/k3s-io/k3s/pull/9740) + +----- +## Release [v1.26.14+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.14+k3s1) + + +This release updates Kubernetes to v1.26.14, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v12613). + +### Changes since v1.26.13+k3s2: + +* Chore: bump Local Path Provisioner version [(#9428)](https://github.com/k3s-io/k3s/pull/9428) +* Bump cri-dockerd to fix compat with Docker Engine 25 [(#9292)](https://github.com/k3s-io/k3s/pull/9292) +* Auto Dependency Bump [(#9421)](https://github.com/k3s-io/k3s/pull/9421) +* Runtimes refactor using exec.LookPath [(#9429)](https://github.com/k3s-io/k3s/pull/9429) + * Directories containing runtimes need to be included in the $PATH environment variable for effective runtime detection. +* Changed how lastHeartBeatTime works in the etcd condition [(#9423)](https://github.com/k3s-io/k3s/pull/9423) +* Allow executors to define containerd and docker behavior [(#9252)](https://github.com/k3s-io/k3s/pull/9252) +* Update Kube-router to v2.0.1 [(#9406)](https://github.com/k3s-io/k3s/pull/9406) +* Backports for 2024-02 release cycle [(#9464)](https://github.com/k3s-io/k3s/pull/9464) +* Bump flannel version + remove multiclustercidr [(#9409)](https://github.com/k3s-io/k3s/pull/9409) +* Enable longer http timeout requests [(#9446)](https://github.com/k3s-io/k3s/pull/9446) +* Test_UnitApplyContainerdQoSClassConfigFileIfPresent [(#9442)](https://github.com/k3s-io/k3s/pull/9442) +* Support PR testing installs [(#9471)](https://github.com/k3s-io/k3s/pull/9471) +* Update Kubernetes to v1.26.14 [(#9490)](https://github.com/k3s-io/k3s/pull/9490) +* Fix drone publish for arm [(#9510)](https://github.com/k3s-io/k3s/pull/9510) +* Remove failing Drone step [(#9514)](https://github.com/k3s-io/k3s/pull/9514) +* Restore original order of agent startup functions [(#9547)](https://github.com/k3s-io/k3s/pull/9547) +* Fix netpol startup when flannel is disabled [(#9580)](https://github.com/k3s-io/k3s/pull/9580) + +----- +## Release [v1.26.13+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.26.13+k3s2) + + +This release updates Kubernetes to v1.26.13, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v12612). + +**Important Notes** + +Addresses the runc CVE: [CVE-2024-21626](https://nvd.nist.gov/vuln/detail/CVE-2024-21626) by updating runc to v1.1.12. + +### Changes since v1.26.12+k3s1: + +* Add a retry around updating a secrets-encrypt node annotations [(#9123)](https://github.com/k3s-io/k3s/pull/9123) +* Added support for env *_PROXY variables for agent loadbalancer [(#9116)](https://github.com/k3s-io/k3s/pull/9116) +* Wait for taint to be gone in the node before starting the netpol controller [(#9177)](https://github.com/k3s-io/k3s/pull/9177) +* Etcd condition [(#9183)](https://github.com/k3s-io/k3s/pull/9183) +* Backports for 2024-01 [(#9212)](https://github.com/k3s-io/k3s/pull/9212) +* Move proxy dialer out of init() and fix crash [(#9221)](https://github.com/k3s-io/k3s/pull/9221) +* Pin opa version for missing dependency chain [(#9218)](https://github.com/k3s-io/k3s/pull/9218) +* Etcd node is nil [(#9230)](https://github.com/k3s-io/k3s/pull/9230) +* Update to v1.26.13 and Go 1.20.13 [(#9262)](https://github.com/k3s-io/k3s/pull/9262) +* Use `ipFamilyPolicy: RequireDualStack` for dual-stack kube-dns [(#9271)](https://github.com/k3s-io/k3s/pull/9271) +* Backports for 2024-01 k3s2 [(#9338)](https://github.com/k3s-io/k3s/pull/9338) + * Bump runc to v1.1.12 and helm-controller to v0.15.7 + * Fix handling of bare hostname or IP as endpoint address in registries.yaml +* Bump helm-controller to fix issue with ChartContent [(#9348)](https://github.com/k3s-io/k3s/pull/9348) + +----- +## Release [v1.26.12+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.12+k3s1) + + +This release updates Kubernetes to v1.26.12, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v12611). + +### Changes since v1.26.11+k3s2: + +* Runtimes backport [(#9014)](https://github.com/k3s-io/k3s/pull/9014) + * Added runtime classes for wasm/nvidia/crun + * Added default runtime flag for containerd +* Bump containerd/runc to v1.7.10-k3s1/v1.1.10 [(#8964)](https://github.com/k3s-io/k3s/pull/8964) +* Fix overlapping address range [(#9019)](https://github.com/k3s-io/k3s/pull/9019) +* Allow setting default-runtime on servers [(#9028)](https://github.com/k3s-io/k3s/pull/9028) +* Bump containerd to v1.7.11 [(#9042)](https://github.com/k3s-io/k3s/pull/9042) +* Update to v1.26.12-k3s1 [(#9077)](https://github.com/k3s-io/k3s/pull/9077) + +----- +## Release [v1.26.11+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.26.11+k3s2) + + +This release updates Kubernetes to v1.26.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v12610). + +### Changes since v1.26.10+k3s2: + +* Etcd status condition [(#8820)](https://github.com/k3s-io/k3s/pull/8820) +* Backports for 2023-11 release [(#8879)](https://github.com/k3s-io/k3s/pull/8879) + * New timezone info in Docker image allows the use of `spec.timeZone` in CronJobs + * Bumped kine to v0.11.0 to resolve issues with postgres and NATS, fix performance of watch channels under heavy load, and improve compatibility with the reference implementation. + * Containerd may now be configured to use rdt or blockio configuration by defining `rdt_config.yaml` or `blockio_config.yaml` files. + * Add agent flag disable-apiserver-lb, agent will not start load balance proxy. + * Improved ingress IP ordering from ServiceLB + * Disable helm CRD installation for disable-helm-controller + * Omit snapshot list configmap entries for snapshots without extra metadata + * Add jitter to client config retry to avoid hammering servers when they are starting up +* Add warning for removal of multiclustercidr flag [(#8760)](https://github.com/k3s-io/k3s/pull/8760) +* Handle nil pointer when runtime core is not ready in etcd [(#8888)](https://github.com/k3s-io/k3s/pull/8888) +* Improve dualStack log [(#8829)](https://github.com/k3s-io/k3s/pull/8829) +* Bump dynamiclistener; reduce snapshot controller log spew [(#8903)](https://github.com/k3s-io/k3s/pull/8903) + * Bumped dynamiclistener to address a race condition that could cause a server to fail to sync its certificates into the Kubernetes secret + * Reduced etcd snapshot log spam during initial cluster startup +* Fix etcd snapshot S3 issues [(#8938)](https://github.com/k3s-io/k3s/pull/8938) + * Don't apply S3 retention if S3 client failed to initialize + * Don't request metadata when listing S3 snapshots + * Print key instead of file path in snapshot metadata log message +* Update to v1.26.11 and Go to 1.20.11 [(#8922)](https://github.com/k3s-io/k3s/pull/8922) +* Remove s390x [(#9000)](https://github.com/k3s-io/k3s/pull/9000) + +----- +## Release [v1.26.10+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.26.10+k3s2) + + +This release updates Kubernetes to v1.26.10, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v12610). + +### Changes since v1.26.10+k3s1: + +* Fix SystemdCgroup in templates_linux.go [(#8766)](https://github.com/k3s-io/k3s/pull/8766) + * Fixed an issue with identifying additional container runtimes +* Update traefik chart to v25.0.0 [(#8776)](https://github.com/k3s-io/k3s/pull/8776) +* Update traefik to fix registry value [(#8790)](https://github.com/k3s-io/k3s/pull/8790) + +----- +## Release [v1.26.10+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.10+k3s1) + + +This release updates Kubernetes to v1.26.10, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1269). + +### Changes since v1.26.9+k3s1: + +* Fix error reporting [(#8412)](https://github.com/k3s-io/k3s/pull/8412) +* Add context to flannel errors [(#8420)](https://github.com/k3s-io/k3s/pull/8420) +* Testing Backports for September [(#8300)](https://github.com/k3s-io/k3s/pull/8300) +* Include the interface name in the error message [(#8436)](https://github.com/k3s-io/k3s/pull/8436) +* Update kube-router [(#8444)](https://github.com/k3s-io/k3s/pull/8444) +* Add extraArgs to tailscale [(#8465)](https://github.com/k3s-io/k3s/pull/8465) +* Added error when cluster reset while using server flag [(#8456)](https://github.com/k3s-io/k3s/pull/8456) + * The user will receive a error when --cluster-reset with the --server flag +* Cluster reset from non bootstrap nodes [(#8453)](https://github.com/k3s-io/k3s/pull/8453) +* Fix spellcheck problem [(#8510)](https://github.com/k3s-io/k3s/pull/8510) +* Take IPFamily precedence based on order [(#8505)](https://github.com/k3s-io/k3s/pull/8505) +* Network defaults are duplicated, remove one [(#8552)](https://github.com/k3s-io/k3s/pull/8552) +* Advertise address integration test [(#8517)](https://github.com/k3s-io/k3s/pull/8517) +* System agent push tags fix [(#8570)](https://github.com/k3s-io/k3s/pull/8570) +* Fixed tailscale node IP dualstack mode in case of IPv4 only node [(#8559)](https://github.com/k3s-io/k3s/pull/8559) +* Server Token Rotation [(#8577)](https://github.com/k3s-io/k3s/pull/8577) + * Users can now rotate the server token using `k3s token rotate -t --new-token `. After command succeeds, all server nodes must be restarted with the new token. +* Clear remove annotations on cluster reset [(#8590)](https://github.com/k3s-io/k3s/pull/8590) + * Fixed an issue that could cause k3s to attempt to remove members from the etcd cluster immediately following a cluster-reset/restore, if they were queued for removal at the time the snapshot was taken. +* Use IPv6 in case is the first configured IP with dualstack [(#8598)](https://github.com/k3s-io/k3s/pull/8598) +* Backports for 2023-10 release [(#8616)](https://github.com/k3s-io/k3s/pull/8616) +* E2E Domain Drone Cleanup [(#8583)](https://github.com/k3s-io/k3s/pull/8583) +* Update kube-router package in build script [(#8635)](https://github.com/k3s-io/k3s/pull/8635) +* Add etcd-only/control-plane-only server test and fix control-plane-only server crash [(#8643)](https://github.com/k3s-io/k3s/pull/8643) +* Use `version.Program` not K3s in token rotate logs [(#8655)](https://github.com/k3s-io/k3s/pull/8655) +* Windows agent support [(#8647)](https://github.com/k3s-io/k3s/pull/8647) +* Add --image-service-endpoint flag (#8279) [(#8663)](https://github.com/k3s-io/k3s/pull/8663) + * Add `--image-service-endpoint` flag to specify an external image service socket. +* Backport etcd fixes [(#8691)](https://github.com/k3s-io/k3s/pull/8691) + * Re-enable etcd endpoint auto-sync + * Manually requeue configmap reconcile when no nodes have reconciled snapshots +* Update to v1.26.10 and Go to v1.20.10 [(#8680)](https://github.com/k3s-io/k3s/pull/8680) +* Fix s3 snapshot restore [(#8734)](https://github.com/k3s-io/k3s/pull/8734) + +----- +## Release [v1.26.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.9+k3s1) + + +This release updates Kubernetes to v1.26.9, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1268). + +### Changes since v1.26.8+k3s1: + +* Bump kine to v0.10.3 [(#8325)](https://github.com/k3s-io/k3s/pull/8325) +* Update to v1.26.9 and go to v1.20.8 [(#8357)](https://github.com/k3s-io/k3s/pull/8357) + * Bump embedded containerd to v1.7.6 + * Bump embedded stargz-snapshotter plugin to latest + * Fixed intermittent drone CI failures due to race conditions in test environment setup scripts + * Fixed CI failures due to changes to api discovery changes in Kubernetes 1.28 + +----- +## Release [v1.26.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.8+k3s1) + +This release updates Kubernetes to v1.26.8, and fixes a number of issues. + +:::warning Important +This release includes support for remediating CVE-2023-32187, a potential Denial of Service attack vector on K3s servers. See https://github.com/k3s-io/k3s/security/advisories/GHSA-m4hf-6vgr-75r2 for more information, including mandatory steps necessary to harden clusters against this vulnerability. +::: + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1267). + +### Changes since v1.26.7+k3s1: + +* Update flannel and plugins [(#8075)](https://github.com/k3s-io/k3s/pull/8075) +* Fix tailscale bug with ip modes [(#8097)](https://github.com/k3s-io/k3s/pull/8097) +* Etcd snapshots retention when node name changes [(#8122)](https://github.com/k3s-io/k3s/pull/8122) +* August Test Backports [(#8126)](https://github.com/k3s-io/k3s/pull/8126) +* Backports for 2023-08 release [(#8129)](https://github.com/k3s-io/k3s/pull/8129) + * K3s's external apiserver listener now declines to add to its certificate any subject names not associated with the kubernetes apiserver service, server nodes, or values of the --tls-san option. This prevents the certificate's SAN list from being filled with unwanted entries. + * K3s no longer enables the apiserver's `enable-aggregator-routing` flag when the egress proxy is not being used to route connections to in-cluster endpoints. + * Updated the embedded containerd to v1.7.3+k3s1 + * Updated the embedded runc to v1.1.8 + * Updated the embedded etcd to v3.5.9+k3s1 + * User-provided containerd config templates may now use `{{ template "base" . }}` to include the default K3s template content. This makes it easier to maintain user configuration if the only need is to add additional sections to the file. + * Bump docker/docker module version to fix issues with cri-dockerd caused by recent releases of golang rejecting invalid host headers sent by the docker client. + * Updated kine to v0.10.2 +* - K3s etcd-snapshot delete fail to delete local file when called with s3 flag [(#8144)](https://github.com/k3s-io/k3s/pull/8144) +* - Fix for cluster-reset backup from s3 when etcd snapshots are disabled [(#8170)](https://github.com/k3s-io/k3s/pull/8170) +* Fixed the etcd retention to delete orphaned snapshots based on the date [(#8189)](https://github.com/k3s-io/k3s/pull/8189) +* Additional backports for 2023-08 release [(#8212)](https://github.com/k3s-io/k3s/pull/8212) + * The version of `helm` used by the bundled helm controller's job image has been updated to v3.12.3 + * Bumped dynamiclistener to address an issue that could cause the apiserver/supervisor listener on 6443 to stop serving requests on etcd-only nodes. + * The K3s external apiserver/supervisor listener on 6443 now sends a complete certificate chain in the TLS handshake. +* Move flannel to 0.22.2 [(#8222)](https://github.com/k3s-io/k3s/pull/8222) +* Update to v1.26.8 [(#8235)](https://github.com/k3s-io/k3s/pull/8235) +* Add new CLI flag to enable TLS SAN CN filtering [(#8258)](https://github.com/k3s-io/k3s/pull/8258) + * Added a new `--tls-san-security` option. This flag defaults to false, but can be set to true to disable automatically adding SANs to the server's TLS certificate to satisfy any hostname requested by a client. +* Add RWMutex to address controller [(#8274)](https://github.com/k3s-io/k3s/pull/8274) + +----- +## Release [v1.26.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.7+k3s1) + + +This release updates Kubernetes to v1.26.7, and fixes a number of issues. +​ +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1266). +​ +### Changes since v1.26.6+k3s1: +​ +* Remove file_windows.go [(#7855)](https://github.com/k3s-io/k3s/pull/7855) +* Fix code spell check [(#7859)](https://github.com/k3s-io/k3s/pull/7859) +* Allow k3s to customize apiServerPort on helm-controller [(#7874)](https://github.com/k3s-io/k3s/pull/7874) +* Check if we are on ipv4, ipv6 or dualStack when doing tailscale [(#7882)](https://github.com/k3s-io/k3s/pull/7882) +* Support setting control server URL for Tailscale. [(#7893)](https://github.com/k3s-io/k3s/pull/7893) +* S3 and Startup tests [(#7885)](https://github.com/k3s-io/k3s/pull/7885) +* Fix rootless node password [(#7901)](https://github.com/k3s-io/k3s/pull/7901) +* Backports for 2023-07 release [(#7908)](https://github.com/k3s-io/k3s/pull/7908) + * Resolved an issue that caused agents joined with kubeadm-style bootstrap tokens to fail to rejoin the cluster when their node object is deleted. + * The `k3s certificate rotate-ca` command now supports the data-dir flag. +* Adding cli to custom klipper helm image [(#7914)](https://github.com/k3s-io/k3s/pull/7914) + * The default helm-controller job image can now be overridden with the --helm-job-image CLI flag +* Generation of certs and keys for etcd gated if etcd is disabled [(#7944)](https://github.com/k3s-io/k3s/pull/7944) +* Don't use zgrep in `check-config` if apparmor profile is enforced [(#7956)](https://github.com/k3s-io/k3s/pull/7956) +* Fix image_scan.sh script and download trivy version (#7950) [(#7968)](https://github.com/k3s-io/k3s/pull/7968) +* Adjust default kubeconfig file permissions [(#7983)](https://github.com/k3s-io/k3s/pull/7983) +* Update to v1.26.7 [(#8022)](https://github.com/k3s-io/k3s/pull/8022) +​ +----- +## Release [v1.26.6+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.6+k3s1) + +This release updates Kubernetes to v1.26.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1265). + +### Changes since v1.26.5+k3s1: + +* Update flannel version [(#7648)](https://github.com/k3s-io/k3s/pull/7648) +* Bump vagrant libvirt with fix for plugin installs [(#7658)](https://github.com/k3s-io/k3s/pull/7658) +* E2E and Dep Backports - June [(#7693)](https://github.com/k3s-io/k3s/pull/7693) + * Bump docker go.mod #7681 + * Shortcircuit commands with version or help flags #7683 + * Add Rotation certification Check, remove func to restart agents #7097 + * E2E: Sudo for RunCmdOnNode #7686 +* VPN integration [(#7727)](https://github.com/k3s-io/k3s/pull/7727) +* E2e: Private registry test [(#7721)](https://github.com/k3s-io/k3s/pull/7721) +* Fix spelling check [(#7751)](https://github.com/k3s-io/k3s/pull/7751) +* Remove unused libvirt config [(#7757)](https://github.com/k3s-io/k3s/pull/7757) +* Backport version bumps and bugfixes [(#7717)](https://github.com/k3s-io/k3s/pull/7717) + * The bundled metrics-server has been bumped to v0.6.3, and now uses only secure TLS ciphers by default. + * The `coredns-custom` ConfigMap now allows for `*.override` sections to be included in the `.:53` default server block. + * The K3s core controllers (supervisor, deploy, and helm) no longer use the admin kubeconfig. This makes it easier to determine from access and audit logs which actions are performed by the system, and which are performed by an administrative user. + * Bumped klipper-lb image to v0.4.4 to resolve an issue that prevented access to ServiceLB ports from localhost when the Service ExternalTrafficPolicy was set to Local. + * Make LB image configurable when compiling k3s + * K3s now allows nodes to join the cluster even if the node password secret cannot be created at the time the node joins. The secret create will be retried in the background. This resolves a potential deadlock created by fail-closed validating webhooks that block secret creation, where the webhook is unavailable until new nodes join the cluster to run the webhook pod. + * The bundled containerd's aufs/devmapper/zfs snapshotter plugins have been restored. These were unintentionally omitted when moving containerd back into the k3s multicall binary in the previous release. + * The embedded helm controller has been bumped to v0.15.0, and now supports creating the chart's target namespace if it does not exist. +* Add format command on makefile [(#7762)](https://github.com/k3s-io/k3s/pull/7762) +* Fix logging and cleanup in Tailscale [(#7782)](https://github.com/k3s-io/k3s/pull/7782) +* Update Kubernetes to v1.26.6 [(#7789)](https://github.com/k3s-io/k3s/pull/7789) + +----- +## Release [v1.26.5+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.5+k3s1) + +This release updates Kubernetes to v1.26.5, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1264). + +### Changes since v1.26.4+k3s1: + +* Ensure that klog verbosity is set to the same level as logrus [(#7360)](https://github.com/k3s-io/k3s/pull/7360) +* Prepend release branch to dependabot [(#7374)](https://github.com/k3s-io/k3s/pull/7374) +* Add integration tests for etc-snapshot server flags [(#7377)](https://github.com/k3s-io/k3s/pull/7377) +* Bump Runc and Containerd [(#7399)](https://github.com/k3s-io/k3s/pull/7399) +* CLI + Config Enhancement [(#7403)](https://github.com/k3s-io/k3s/pull/7403) + * `--Tls-sans` now accepts multiple arguments: `--tls-sans="foo,bar"` + * `Prefer-bundled-bin: true` now works properly when set in `config.yaml.d` files +* Migrate netutil methods into /utils/net.go [(#7432)](https://github.com/k3s-io/k3s/pull/7432) +* Bump kube-router version to fix a bug when a port name is used [(#7460)](https://github.com/k3s-io/k3s/pull/7460) +* Kube flags and longhorn storage tests [(#7465)](https://github.com/k3s-io/k3s/pull/7465) +* Local-storage: Fix permission [(#7474)](https://github.com/k3s-io/k3s/pull/7474) +* Bump containerd to v1.7.0 and move back into multicall binary [(#7444)](https://github.com/k3s-io/k3s/pull/7444) + * The embedded containerd version has been bumped to `v1.7.0-k3s1`, and has been reintegrated into the main k3s binary for a significant savings in release artifact size. +* Backport version bumps and bugfixes [(#7514)](https://github.com/k3s-io/k3s/pull/7514) + * K3s now retries the cluster join operation when receiving a "too many learners" error from etcd. This most frequently occurred when attempting to add multiple servers at the same time. + * K3s once again supports aarch64 nodes with page size > 4k + * The packaged Traefik version has been bumped to v2.9.10 / chart 21.2.0 + * K3s now prints a more meaningful error when attempting to run from a filesystem mounted `noexec`. + * K3s now exits with a proper error message when the server token uses a bootstrap token `id.secret` format. + * Fixed an issue where Addon, HelmChart, and HelmChartConfig CRDs were created without structural schema, allowing the creation of custom resources of these types with invalid content. + * Servers started with the (experimental) --disable-agent flag no longer attempt to run the tunnel authorizer agent component. + * Fixed an regression that prevented the pod and cluster egress-selector modes from working properly. + * K3s now correctly passes through etcd-args to the temporary etcd that is used to extract cluster bootstrap data when restarting managed etcd nodes. + * K3s now properly handles errors obtaining the current etcd cluster member list when a new server is joining the managed etcd cluster. + * The embedded kine version has been bumped to v0.10.1. This replaces the legacy `lib/pq` postgres driver with `pgx`. + * The bundled CNI plugins have been upgraded to v1.2.0-k3s1. The bandwidth and firewall plugins are now included in the bundle. + * The embedded Helm controller now supports authenticating to chart repositories via credentials stored in a Secret, as well as passing repo CAs via ConfigMap. +* Bump containerd/runc to v1.7.1-k3s1/v1.1.7 [(#7534)](https://github.com/k3s-io/k3s/pull/7534) + * The bundled containerd and runc versions have been bumped to v1.7.1-k3s1/v1.1.7 +* Wrap error stating that it is coming from netpol [(#7547)](https://github.com/k3s-io/k3s/pull/7547) +* Add '-all' flag to apply to inactive units [(#7573)](https://github.com/k3s-io/k3s/pull/7573) +* Update to v1.26.5-k3s1 [(#7576)](https://github.com/k3s-io/k3s/pull/7576) +* Pin emicklei/go-restful to v3.9.0 [(#7598)](https://github.com/k3s-io/k3s/pull/7598) + +----- +## Release [v1.26.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.4+k3s1) + +This release updates Kubernetes to v1.26.4, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1263). + +### Changes since v1.26.3+k3s1: + +* Enhance `k3s check-config` [(#7091)](https://github.com/k3s-io/k3s/pull/7091) +* Update stable channel to v1.25.8+k3s1 [(#7161)](https://github.com/k3s-io/k3s/pull/7161) +* Drone Pipelines enhancement [(#7169)](https://github.com/k3s-io/k3s/pull/7169) +* Fix_get_sha_url [(#7187)](https://github.com/k3s-io/k3s/pull/7187) +* Improve Updatecli local-path-provisioner pipeline [(#7181)](https://github.com/k3s-io/k3s/pull/7181) +* Improve workflow [(#7142)](https://github.com/k3s-io/k3s/pull/7142) +* Improve Trivy configuration [(#7154)](https://github.com/k3s-io/k3s/pull/7154) +* Bump Local Path Provisioner version [(#7167)](https://github.com/k3s-io/k3s/pull/7167) + * The bundled local-path-provisioner version has been bumped to v0.0.24 +* Bump etcd to v3.5.7 [(#7170)](https://github.com/k3s-io/k3s/pull/7170) + * The embedded etcd version has been bumped to v3.5.7 +* Bump runc to v1.1.5 [(#7171)](https://github.com/k3s-io/k3s/pull/7171) + * The bundled runc version has been bumped to v1.1.5 +* Fix race condition caused by etcd advertising addresses that it does not listen on [(#7147)](https://github.com/k3s-io/k3s/pull/7147) + * Fixed a race condition during cluster reset that could cause the operation to hang and time out. +* Bump coredns to v1.10.1 [(#7168)](https://github.com/k3s-io/k3s/pull/7168) + * The bundled coredns version has been bumped to v1.10.1 +* Don't apply hardened args to agent [(#7089)](https://github.com/k3s-io/k3s/pull/7089) +* Upgrade helm-controller to v0.13.3 [(#7209)](https://github.com/k3s-io/k3s/pull/7209) +* Improve Klipper Helm and Helm controller bumps [(#7146)](https://github.com/k3s-io/k3s/pull/7146) +* Fix issue with stale connections to removed LB server [(#7194)](https://github.com/k3s-io/k3s/pull/7194) + * The client load-balancer that maintains connections to active server nodes now closes connections to servers when they are removed from the cluster. This ensures that agent components immediately reconnect to a current cluster member. +* Bump actions/setup-go from 3 to 4 [(#7111)](https://github.com/k3s-io/k3s/pull/7111) +* Lock bootstrap data with empty key to prevent conflicts [(#7215)](https://github.com/k3s-io/k3s/pull/7215) + * When using an external datastore, K3s now locks the bootstrap key while creating initial cluster bootstrap data, preventing a race condition when multiple servers attempted to initialize the cluster simultaneously. +* Updated kube-router to move the default ACCEPT rule at the end of the chain [(#7218)](https://github.com/k3s-io/k3s/pull/7218) + * The embedded kube-router controller has been updated to fix a regression that caused traffic from pods to be blocked by any default drop/deny rules present on the host. Users should still confirm that any externally-managed firewall rules explicitly allow traffic to/from pod and service networks, but this returns the old behavior that was relied upon by some users. +* Add make commands to terraform automation and fix external dbs related issue [(#7159)](https://github.com/k3s-io/k3s/pull/7159) +* Update klipper lb to v0.4.2 [(#7210)](https://github.com/k3s-io/k3s/pull/7210) +* Add coreos and sle micro to selinux support [(#6945)](https://github.com/k3s-io/k3s/pull/6945) +* Fix call for k3s-selinux versions in airgapped environments [(#7264)](https://github.com/k3s-io/k3s/pull/7264) +* Update Kube-router ACCEPT rule insertion and install script to clean rules before start [(#7274)](https://github.com/k3s-io/k3s/pull/7274) + * The embedded kube-router controller has been updated to fix a regression that caused traffic from pods to be blocked by any default drop/deny rules present on the host. Users should still confirm that any externally-managed firewall rules explicitly allow traffic to/from pod and service networks, but this returns the old behavior that was relied upon by some users. +* Update to v1.26.4-k3s1 [(#7282)](https://github.com/k3s-io/k3s/pull/7282) +* Bump golang:alpine image version [(#7292)](https://github.com/k3s-io/k3s/pull/7292) +* Bump Sonobuoy version [(#7256)](https://github.com/k3s-io/k3s/pull/7256) +* Bump Trivy version [(#7257)](https://github.com/k3s-io/k3s/pull/7257) + +----- +## Release [v1.26.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.3+k3s1) + +This release updates Kubernetes to v1.26.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1262). + +### Changes since v1.26.2+k3s1: + +* Add E2E to Drone [(#6890)](https://github.com/k3s-io/k3s/pull/6890) +* Add flannel adr [(#6973)](https://github.com/k3s-io/k3s/pull/6973) +* Update flannel and kube-router [(#7039)](https://github.com/k3s-io/k3s/pull/7039) +* Bump various dependencies for CVEs [(#7044)](https://github.com/k3s-io/k3s/pull/7044) +* Adds a warning about editing to the containerd config.toml file [(#7057)](https://github.com/k3s-io/k3s/pull/7057) +* Update stable version in channel server [(#7066)](https://github.com/k3s-io/k3s/pull/7066) +* Wait for kubelet port to be ready before setting [(#7041)](https://github.com/k3s-io/k3s/pull/7041) + * The agent tunnel authorizer now waits for the kubelet to be ready before reading the kubelet port from the node object. +* Improve support for rotating the default self-signed certs [(#7032)](https://github.com/k3s-io/k3s/pull/7032) + * The `k3s certificate rotate-ca` checks now support rotating self-signed certificates without the `--force` option. +* Skip all pipelines based on what is in the PR [(#6996)](https://github.com/k3s-io/k3s/pull/6996) +* Add missing kernel config checks [(#6946)](https://github.com/k3s-io/k3s/pull/6946) +* Remove deprecated nodeSelector label beta.kubernetes.io/os [(#6970)](https://github.com/k3s-io/k3s/pull/6970) +* MultiClusterCIDR for v1.26 [(#6885)](https://github.com/k3s-io/k3s/pull/6885) + * MultiClusterCIDR feature +* Remove Nikolai from MAINTAINERS list [(#7088)](https://github.com/k3s-io/k3s/pull/7088) +* Add automation for Restart command for K3s [(#7002)](https://github.com/k3s-io/k3s/pull/7002) +* Fix to Rotate CA e2e test [(#7101)](https://github.com/k3s-io/k3s/pull/7101) +* Drone: Cleanup E2E VMs on test panic [(#7104)](https://github.com/k3s-io/k3s/pull/7104) +* Update to v1.26.3-k3s1 [(#7108)](https://github.com/k3s-io/k3s/pull/7108) +* Pin golangci-lint version to v1.51.2 [(#7113)](https://github.com/k3s-io/k3s/pull/7113) +* Clean E2E VMs before testing [(#7109)](https://github.com/k3s-io/k3s/pull/7109) +* Update flannel to fix NAT issue with old iptables version [(#7136)](https://github.com/k3s-io/k3s/pull/7136) + +----- +## Release [v1.26.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.2+k3s1) + +This release updates Kubernetes to v1.26.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1261). + +### Changes since v1.26.1+k3s1: + +* Add build tag to disable cri-dockerd [(#6760)](https://github.com/k3s-io/k3s/pull/6760) +* Bump cri-dockerd [(#6797)](https://github.com/k3s-io/k3s/pull/6797) + * The embedded cri-dockerd has been updated to v0.3.1 +* Update stable channel to v1.25.6+k3s1 [(#6828)](https://github.com/k3s-io/k3s/pull/6828) +* E2E Rancher and Hardened script improvements [(#6778)](https://github.com/k3s-io/k3s/pull/6778) +* Add Ayedo to Adopters [(#6801)](https://github.com/k3s-io/k3s/pull/6801) +* Consolidate E2E tests and GH Actions [(#6772)](https://github.com/k3s-io/k3s/pull/6772) +* Allow ServiceLB to honor `ExternalTrafficPolicy=Local` [(#6726)](https://github.com/k3s-io/k3s/pull/6726) + * ServiceLB now honors the Service's ExternalTrafficPolicy. When set to Local, the LoadBalancer will only advertise addresses of Nodes with a Pod for the Service, and will not forward traffic to other cluster members. +* Fix cronjob example [(#6707)](https://github.com/k3s-io/k3s/pull/6707) +* Bump vagrant boxes to fedora37 [(#6832)](https://github.com/k3s-io/k3s/pull/6832) +* Ensure flag type consistency [(#6852)](https://github.com/k3s-io/k3s/pull/6852) +* E2E: Consoldiate docker and prefer bundled tests into new startup test [(#6851)](https://github.com/k3s-io/k3s/pull/6851) +* Fix reference to documentation [(#6860)](https://github.com/k3s-io/k3s/pull/6860) +* Bump deps: trivy, sonobuoy, dapper, golangci-lint, gopls [(#6807)](https://github.com/k3s-io/k3s/pull/6807) +* Fix check for (open)SUSE version [(#6791)](https://github.com/k3s-io/k3s/pull/6791) +* Add support for user-provided CA certificates [(#6615)](https://github.com/k3s-io/k3s/pull/6615) + * K3s now functions properly when the cluster CA certificates are signed by an existing root or intermediate CA. You can find a sample script for generating such certificates before K3s starts in the github repo at [contrib/util/certs.sh](https://github.com/k3s-io/k3s/blob/master/contrib/util/certs.sh). +* Ignore value conflicts when reencrypting secrets [(#6850)](https://github.com/k3s-io/k3s/pull/6850) +* Add `kubeadm` style bootstrap token secret support [(#6663)](https://github.com/k3s-io/k3s/pull/6663) + * K3s now supports `kubeadm` style join tokens. `k3s token create` now creates join token secrets, optionally with a limited TTL. + * K3s agents joined with an expired or deleted token stay in the cluster using existing client certificates via the NodeAuthorization admission plugin, unless their Node object is deleted from the cluster. +* Add NATS to the list of supported data stores [(#6876)](https://github.com/k3s-io/k3s/pull/6876) +* Use default address family when adding kubernetes service address to SAN list [(#6857)](https://github.com/k3s-io/k3s/pull/6857) + * The apiserver advertised address and IP SAN entry are now set correctly on clusters that use IPv6 as the default IP family. +* Fix issue with servicelb startup failure when validating webhooks block creation [(#6911)](https://github.com/k3s-io/k3s/pull/6911) + * The embedded cloud controller manager will no longer attempt to unconditionally re-create its namespace and serviceaccount on startup. This resolves an issue that could cause a deadlocked cluster when fail-closed webhooks are in use. +* Fix access to hostNetwork port on NodeIP when egress-selector-mode=agent [(#6829)](https://github.com/k3s-io/k3s/pull/6829) + * Fixed an issue that would cause the apiserver egress proxy to attempt to use the agent tunnel to connect to service endpoints even in agent or disabled mode. +* Wait for server to become ready before creating token [(#6932)](https://github.com/k3s-io/k3s/pull/6932) +* Allow for multiple sets of leader-elected controllers [(#6922)](https://github.com/k3s-io/k3s/pull/6922) + * Fixed an issue where leader-elected controllers for managed etcd did not run on etcd-only nodes +* Update Flannel to v0.21.1 [(#6944)](https://github.com/k3s-io/k3s/pull/6944) +* Fix Nightly E2E tests [(#6950)](https://github.com/k3s-io/k3s/pull/6950) +* Fix etcd and ca-cert rotate issues [(#6952)](https://github.com/k3s-io/k3s/pull/6952) +* Fix ServiceLB dual-stack ingress IP listing [(#6979)](https://github.com/k3s-io/k3s/pull/6979) + * Resolved an issue with ServiceLB that would cause it to advertise node IPv6 addresses, even if the cluster or service was not enabled for dual-stack operation. +* Bump kine to v0.9.9 [(#6974)](https://github.com/k3s-io/k3s/pull/6974) + * The embedded kine version has been bumped to v0.9.9. Compaction log messages are now omitted at `info` level for increased visibility. +* Update to v1.26.2-k3s1 [(#7011)](https://github.com/k3s-io/k3s/pull/7011) + +----- +## Release [v1.26.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.1+k3s1) + +This release updates Kubernetes to v1.26.1, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1260). + +### Changes since v1.26.0+k3s2: + +* Add jitter to scheduled snapshots and retry harder on conflicts [(#6715)](https://github.com/k3s-io/k3s/pull/6715) + * Scheduled etcd snapshots are now offset by a short random delay of up to several seconds. This should prevent multi-server clusters from executing pathological behavior when attempting to simultaneously update the snapshot list ConfigMap. The snapshot controller will also be more persistent in attempting to update the snapshot list. +* Adjust e2e test run script and fixes [(#6718)](https://github.com/k3s-io/k3s/pull/6718) +* RIP Codespell [(#6701)](https://github.com/k3s-io/k3s/pull/6701) +* Bump alpine from 3.16 to 3.17 in /package [(#6688)](https://github.com/k3s-io/k3s/pull/6688) +* Bump alpine from 3.16 to 3.17 in /conformance [(#6687)](https://github.com/k3s-io/k3s/pull/6687) +* Bump containerd to v1.6.15-k3s1 [(#6722)](https://github.com/k3s-io/k3s/pull/6722) + * The embedded containerd version has been bumped to v1.6.15-k3s1 +* Containerd restart testlet [(#6696)](https://github.com/k3s-io/k3s/pull/6696) +* Bump ubuntu from 20.04 to 22.04 in /tests/e2e/scripts [(#6686)](https://github.com/k3s-io/k3s/pull/6686) +* Add explicit read permissions to workflows [(#6700)](https://github.com/k3s-io/k3s/pull/6700) +* Pass through default tls-cipher-suites [(#6725)](https://github.com/k3s-io/k3s/pull/6725) + * The K3s default cipher suites are now explicitly passed in to kube-apiserver, ensuring that all listeners use these values. +* Bump golang:alpine image version [(#6683)](https://github.com/k3s-io/k3s/pull/6683) +* Bugfix: do not break cert-manager when pprof is enabled [(#6635)](https://github.com/k3s-io/k3s/pull/6635) +* Fix CI tests on Alpine 3.17 [(#6744)](https://github.com/k3s-io/k3s/pull/6744) +* Update Stable to 1.25.5+k3s2 [(#6753)](https://github.com/k3s-io/k3s/pull/6753) +* Bump action/download-artifact to v3 [(#6746)](https://github.com/k3s-io/k3s/pull/6746) +* Generate report and upload test results [(#6737)](https://github.com/k3s-io/k3s/pull/6737) +* Slow dependency CI to weekly [(#6764)](https://github.com/k3s-io/k3s/pull/6764) +* Fix Drone plugins/docker tag for 32 bit arm [(#6769)](https://github.com/k3s-io/k3s/pull/6769) +* Update to v1.26.1-k3s1 [(#6774)](https://github.com/k3s-io/k3s/pull/6774) + +----- +## Release [v1.26.0+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.26.0+k3s2) + + +This release updates containerd to v1.6.14 to resolve an issue where pods would lose their CNI information when containerd was restarted, as well as a number of other stability and administrative changes. + +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#urgent-upgrade-notes). + +### Changes since v1.26.0+k3s1: + +* Current status badges [(#6653)](https://github.com/k3s-io/k3s/pull/6653) +* Add initial Updatecli ADR automation [(#6583)](https://github.com/k3s-io/k3s/pull/6583) +* December 2022 channels update [(#6618)](https://github.com/k3s-io/k3s/pull/6618) +* Change Updatecli GH action reference branch [(#6682)](https://github.com/k3s-io/k3s/pull/6682) +* Fix OpenRC init script error 'openrc-run.sh: source: not found' [(#6614)](https://github.com/k3s-io/k3s/pull/6614) +* Add Dependabot config for security ADR [(#6560)](https://github.com/k3s-io/k3s/pull/6560) +* Bump containerd to v1.6.14-k3s1 [(#6693)](https://github.com/k3s-io/k3s/pull/6693) + * The embedded containerd version has been bumped to v1.6.14-k3s1. This includes a backported fix for [containerd/7843](https://github.com/containerd/containerd/issues/7843) which caused pods to lose their CNI info when containerd was restarted, which in turn caused the kubelet to recreate the pod. +* Exclude December r1 releases from channel server [(#6706)](https://github.com/k3s-io/k3s/pull/6706) + +----- +## Release [v1.26.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.26.0+k3s1) + + +> ## ⚠️ WARNING +> This release is affected by https://github.com/containerd/containerd/issues/7843, which causes the kubelet to restart all pods whenever K3s is restarted. For this reason, we have removed this K3s release from the channel server. Please use `v1.26.0+k3s2` instead. + +This release is K3S's first in the v1.26 line. This release updates Kubernetes to v1.26.0. + +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#urgent-upgrade-notes). + +### Changes since v1.25.5+k3s1: + +* Remove deprecated flags in v1.26 [(#6574)](https://github.com/k3s-io/k3s/pull/6574) +* Using "etcd-snapshot" for saving snapshots is now deprecated, use "etcd-snapshot save" instead. [(#6575)](https://github.com/k3s-io/k3s/pull/6575) +* Update to v1.26.0-k3s1 + * * Update kubernetes to v1.26.0-k3s1 + * * Update cri-tools to v1.26.0-rc.0-k3s1 + * * Update helm controller to v0.13.1 + * * Update etcd to v3.5.5-k3s1 + * * Update cri-dockerd to the latest 1.26.0 + * * Update cadvisor + * * Update containerd to v1.6.12-k3s1 [(#6370)](https://github.com/k3s-io/k3s/pull/6370) +* Preload iptable_filter/ip6table_filter [(#6645)](https://github.com/k3s-io/k3s/pull/6645) +* Bump k3s-root version to v0.12.1 [(#6651)](https://github.com/k3s-io/k3s/pull/6651) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.27.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.27.X.md new file mode 100644 index 000000000..f0f711721 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.27.X.md @@ -0,0 +1,629 @@ +--- +hide_table_of_contents: true +sidebar_position: 6 +--- + +# v1.27.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.27.16+k3s1](v1.27.X.md#release-v12716k3s1) | Jul 31 2024| [v1.27.16](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12716) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s2.27) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.27.15+k3s2](v1.27.X.md#release-v12715k3s2) | Jul 03 2024| [v1.27.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12715) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s2.27) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.27.15+k3s1](v1.27.X.md#release-v12715k3s1) | Jun 25 2024| [v1.27.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12715) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s2.27) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.2](https://github.com/flannel-io/flannel/releases/tag/v0.25.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.27.14+k3s1](v1.27.X.md#release-v12714k3s1) | May 22 2024| [v1.27.14](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12714) | [v0.11.7](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1.27) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.27.13+k3s1](v1.27.X.md#release-v12713k3s1) | Apr 25 2024| [v1.27.13](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12713) | [v0.11.7](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1.27) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.27.12+k3s1](v1.27.X.md#release-v12712k3s1) | Mar 25 2024| [v1.27.12](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12712) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.27) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.27.11+k3s1](v1.27.X.md#release-v12711k3s1) | Feb 29 2024| [v1.27.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12711) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.27) | [v1.1.12-k3s1](https://github.com/k3s-io/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.27.10+k3s2](v1.27.X.md#release-v12710k3s2) | Feb 06 2024| [v1.27.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12710) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.27) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.9+k3s1](v1.27.X.md#release-v1279k3s1) | Dec 27 2023| [v1.27.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1279) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2.27) | [v1.1.10](https://github.com/opencontainers/runc/releases/tag/v1.1.10) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.8+k3s2](v1.27.X.md#release-v1278k3s2) | Dec 07 2023| [v1.27.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1278) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1.27) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.7+k3s2](v1.27.X.md#release-v1277k3s2) | Nov 08 2023| [v1.27.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1277) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1.27) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.7+k3s1](v1.27.X.md#release-v1277k3s1) | Oct 30 2023| [v1.27.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1277) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1.27) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.6+k3s1](v1.27.X.md#release-v1276k3s1) | Sep 20 2023| [v1.27.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1276) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.6-k3s1.27](https://github.com/k3s-io/containerd/releases/tag/v1.7.6-k3s1.27) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.5+k3s1](v1.27.X.md#release-v1275k3s1) | Sep 05 2023| [v1.27.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1275) | [v0.10.2](https://github.com/k3s-io/kine/releases/tag/v0.10.2) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.3-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.3-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.4+k3s1](v1.27.X.md#release-v1274k3s1) | Jul 27 2023| [v1.27.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1274) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.22.0](https://github.com/flannel-io/flannel/releases/tag/v0.22.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.2](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.2) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.3+k3s1](v1.27.X.md#release-v1273k3s1) | Jun 26 2023| [v1.27.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1273) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.22.0](https://github.com/flannel-io/flannel/releases/tag/v0.22.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.2+k3s1](v1.27.X.md#release-v1272k3s1) | May 26 2023| [v1.27.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1272) | [v0.10.1](https://github.com/k3s-io/kine/releases/tag/v0.10.1) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.7.1-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.1-k3s1) | [v1.1.7](https://github.com/opencontainers/runc/releases/tag/v1.1.7) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.14.0](https://github.com/k3s-io/helm-controller/releases/tag/v0.14.0) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.27.1+k3s1](v1.27.X.md#release-v1271k3s1) | Apr 27 2023| [v1.27.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v1271) | [v0.9.9](https://github.com/k3s-io/kine/releases/tag/v0.9.9) | [3.39.2](https://sqlite.org/releaselog/3_39_2.html) | [v3.5.7-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.7-k3s1) | [v1.6.19-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.6.19-k3s1) | [v1.1.5](https://github.com/opencontainers/runc/releases/tag/v1.1.5) | [v0.21.4](https://github.com/flannel-io/flannel/releases/tag/v0.21.4) | [v0.6.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.2) | [v2.9.4](https://github.com/traefik/traefik/releases/tag/v2.9.4) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.13.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.13.3) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | + +
+ +## Release [v1.27.16+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.16+k3s1) + + +This release updates Kubernetes to v1.27.16, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12715). + +### Changes since v1.27.15+k3s2: + +* Backports for 2024-07 release cycle [(#10500)](https://github.com/k3s-io/k3s/pull/10500) + * Bump k3s-root to v0.14.0 + * Bump github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.7 + * Bump Local Path Provisioner version + * Ensure remotedialer kubelet connections use kubelet bind address + * Chore: Bump Trivy version + * Add etcd s3 config secret implementation +* July Test Backports [(#10510)](https://github.com/k3s-io/k3s/pull/10510) +* Update to v1.27.16-k3s1 and Go 1.22.5 [(#10542)](https://github.com/k3s-io/k3s/pull/10542) +* Fix issues loading data-dir value from env vars or dropping config files [(#10599)](https://github.com/k3s-io/k3s/pull/10599) + +----- +## Release [v1.27.15+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.27.15+k3s2) + + +This release updates Kubernetes to v1.27.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12715). + +### Changes since v1.27.15+k3s1: + +* Update flannel to v0.25.4 and fixed issue with IPv6 mask [(#10429)](https://github.com/k3s-io/k3s/pull/10429) + +----- +## Release [v1.27.15+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.15+k3s1) + + +This release updates Kubernetes to v1.27.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12714). + +### Changes since v1.27.14+k3s1: + +* Replace deprecated ruby function [(#10089)](https://github.com/k3s-io/k3s/pull/10089) +* Fix bug when using tailscale config by file [(#10143)](https://github.com/k3s-io/k3s/pull/10143) +* Bump flannel version to v0.25.2 [(#10222)](https://github.com/k3s-io/k3s/pull/10222) +* Update kube-router version to v2.1.2 [(#10183)](https://github.com/k3s-io/k3s/pull/10183) +* Improve tailscale test & add extra log in e2e tests [(#10214)](https://github.com/k3s-io/k3s/pull/10214) +* Backports for 2024-06 release cycle [(#10259)](https://github.com/k3s-io/k3s/pull/10259) + * Add WithSkipMissing to not fail import on missing blobs + * Use fixed stream server bind address for cri-dockerd + * Switch stargz over to cri registry config_path + * Bump to containerd v1.7.17, etcd v3.5.13 + * Bump spegel version + * Fix issue with externalTrafficPolicy: Local for single-stack services on dual-stack nodes + * ServiceLB now sets the priorityClassName on svclb pods to `system-node-critical` by default. This can be overridden on a per-service basis via the `svccontroller.k3s.cattle.io/priorityclassname` annotation. + * Bump minio-go to v7.0.70 + * Bump kine to v0.11.9 to fix pagination + * Update valid resolv conf + * Add missing kernel config check + * Symlinked sub-directories are now respected when scanning Auto-Deploying Manifests (AddOns) + * Fix bug: allow helm controller set owner reference + * Bump klipper-helm image for tls secret support + * Fix issue with k3s-etcd informers not starting + * `--Enable-pprof` can now be set on agents to enable the debug/pprof endpoints. When set, agents will listen on the supervisor port. + * `--Supervisor-metrics` can now be set on servers to enable serving internal metrics on the supervisor endpoint; when set agents will listen on the supervisor port. + * Fix netpol crash when node remains tainted uninitialized + * The embedded load-balancer will now fall back to trying all servers with health-checks ignored, if all servers have been marked unavailable due to failed health checks. +* More backports for 2024-06 release cycle [(#10290)](https://github.com/k3s-io/k3s/pull/10290) +* Add snapshot retention etcd-s3-folder fix [(#10314)](https://github.com/k3s-io/k3s/pull/10314) +* Add test for `isValidResolvConf` (#10302) [(#10332)](https://github.com/k3s-io/k3s/pull/10332) +* Fix race condition panic in loadbalancer.nextServer [(#10324)](https://github.com/k3s-io/k3s/pull/10324) +* Fix typo, use `rancher/permissions` [(#10297)](https://github.com/k3s-io/k3s/pull/10297) +* Update Kubernetes to v1.27.15 [(#10346)](https://github.com/k3s-io/k3s/pull/10346) + * Update Kubernetes to v1.27.15 +* Fix agent supervisor port using apiserver port instead [(#10356)](https://github.com/k3s-io/k3s/pull/10356) +* Fix issue that allowed multiple simultaneous snapshots to be allowed [(#10378)](https://github.com/k3s-io/k3s/pull/10378) + +----- +## Release [v1.27.14+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.14+k3s1) + + +This release updates Kubernetes to v1.27.14, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12713). + +### Changes since v1.27.13+k3s1: + +* Bump E2E opensuse leap to 15.6, fix btrfs test [(#10096)](https://github.com/k3s-io/k3s/pull/10096) +* Windows changes [(#10113)](https://github.com/k3s-io/k3s/pull/10113) +* Update to v1.27.14-k3s1 and Go 1.21.9 [(#10103)](https://github.com/k3s-io/k3s/pull/10103) + +----- +## Release [v1.27.13+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.13+k3s1) + + +This release updates Kubernetes to v1.27.13, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12712). + +### Changes since v1.27.12+k3s1: + +* Add a new error when kine is with disable apiserver or disable etcd [(#9803)](https://github.com/k3s-io/k3s/pull/9803) +* Remove old pinned dependencies [(#9828)](https://github.com/k3s-io/k3s/pull/9828) +* Transition from deprecated pointer library to ptr [(#9825)](https://github.com/k3s-io/k3s/pull/9825) +* Golang caching and E2E ubuntu 23.10 [(#9822)](https://github.com/k3s-io/k3s/pull/9822) +* Add tls for kine [(#9850)](https://github.com/k3s-io/k3s/pull/9850) +* Bump spegel to v0.0.20-k3s1 [(#9881)](https://github.com/k3s-io/k3s/pull/9881) +* Backports for 2024-04 release cycle [(#9912)](https://github.com/k3s-io/k3s/pull/9912) + * Send error response if member list cannot be retrieved + * The k3s stub cloud provider now respects the kubelet's requested provider-id, instance type, and topology labels + * Fix error when image has already been pulled + * Add /etc/passwd and /etc/group to k3s docker image + * Fix etcd snapshot reconcile for agentless servers + * Add health-check support to loadbalancer + * Add certificate expiry check, events, and metrics + * Add workaround for containerd hosts.toml bug when passing config for default registry endpoint + * Add supervisor cert/key to rotate list + * The embedded containerd has been bumped to v1.7.15 + * The embedded cri-dockerd has been bumped to v0.3.12 + * The `k3s etcd-snapshot` command has been reworked for improved consistency. All snapshots operations are now performed by the server process, with the CLI acting as a client to initiate and report results. As a side effect, the CLI is now less noisy when managing snapshots. + * Improve etcd load-balancer startup behavior + * Actually fix agent certificate rotation + * Traefik has been bumped to v2.10.7. + * Traefik pod annotations are now set properly in the default chart values. + * The system-default-registry value now supports RFC2732 IPv6 literals. + * The local-path provisioner now defaults to creating `local` volumes, instead of `hostPath`. +* Allow LPP to read helper logs [(#9939)](https://github.com/k3s-io/k3s/pull/9939) +* Update kube-router to v2.1.0 [(#9943)](https://github.com/k3s-io/k3s/pull/9943) +* Update to v1.27.13-k3s1 and Go 1.21.9 [(#9958)](https://github.com/k3s-io/k3s/pull/9958) +* Fix on-demand snapshots timing out; not honoring folder [(#9995)](https://github.com/k3s-io/k3s/pull/9995) +* Make /db/info available anonymously from localhost [(#10003)](https://github.com/k3s-io/k3s/pull/10003) + +----- +## Release [v1.27.12+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.12+k3s1) + + +This release updates Kubernetes to v1.27.12, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12711). + +### Changes since v1.27.11+k3s1: + +* Add an integration test for flannel-backend=none [(#9609)](https://github.com/k3s-io/k3s/pull/9609) +* Install and Unit test backports [(#9642)](https://github.com/k3s-io/k3s/pull/9642) +* Update klipper-lb image version [(#9606)](https://github.com/k3s-io/k3s/pull/9606) +* Adjust first node-ip based on configured clusterCIDR [(#9632)](https://github.com/k3s-io/k3s/pull/9632) +* Improve tailscale e2e test [(#9654)](https://github.com/k3s-io/k3s/pull/9654) +* Backports for 2024-03 release cycle [(#9670)](https://github.com/k3s-io/k3s/pull/9670) + * Fix: use correct wasm shims names + * The embedded flannel cni-plugin binary is now built and versioned separate from the rest of the cni plugins and the embedded flannel controller. + * Bump spegel to v0.0.18-k3s3 + * Adds wildcard registry support + * Fixes issue with excessive CPU utilization while waiting for containerd to start + * Add env var to allow spegel mirroring of latest tag + * Tweak netpol node wait logs + * Fix coredns NodeHosts on dual-stack clusters + * Bump helm-controller/klipper-helm versions + * Fix snapshot prune + * Fix issue with etcd node name missing hostname + * Rootless mode should also bind service nodePort to host for LoadBalancer type, matching UX of rootful mode. + * To enable raw output for the `check-config` subcommand, you may now set NO_COLOR=1 + * Fix additional corner cases in registries handling + * Bump metrics-server to v0.7.0 + * K3s will now warn and suppress duplicate entries in the mirror endpoint list for a registry. Containerd does not support listing the same endpoint multiple times as a mirror for a single upstream registry. +* Docker and E2E Test Backports [(#9708)](https://github.com/k3s-io/k3s/pull/9708) +* Fix wildcard entry upstream fallback [(#9734)](https://github.com/k3s-io/k3s/pull/9734) +* Update to v1.27.12-k3s1 and Go 1.21.8 [(#9745)](https://github.com/k3s-io/k3s/pull/9745) + +----- +## Release [v1.27.11+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.11+k3s1) + + +This release updates Kubernetes to v1.27.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v12710). + +### Changes since v1.27.10+k3s2: + +* Chore: bump Local Path Provisioner version [(#9427)](https://github.com/k3s-io/k3s/pull/9427) +* Bump cri-dockerd to fix compat with Docker Engine 25 [(#9291)](https://github.com/k3s-io/k3s/pull/9291) +* Auto Dependency Bump [(#9420)](https://github.com/k3s-io/k3s/pull/9420) +* Runtimes refactor using exec.LookPath [(#9430)](https://github.com/k3s-io/k3s/pull/9430) + * Directories containing runtimes need to be included in the $PATH environment variable for effective runtime detection. +* Changed how lastHeartBeatTime works in the etcd condition [(#9425)](https://github.com/k3s-io/k3s/pull/9425) +* Allow executors to define containerd and docker behavior [(#9253)](https://github.com/k3s-io/k3s/pull/9253) +* Update Kube-router to v2.0.1 [(#9405)](https://github.com/k3s-io/k3s/pull/9405) +* Backports for 2024-02 release cycle [(#9463)](https://github.com/k3s-io/k3s/pull/9463) +* Bump flannel version + remove multiclustercidr [(#9407)](https://github.com/k3s-io/k3s/pull/9407) +* Enable longer http timeout requests [(#9445)](https://github.com/k3s-io/k3s/pull/9445) +* Test_UnitApplyContainerdQoSClassConfigFileIfPresent [(#9441)](https://github.com/k3s-io/k3s/pull/9441) +* Support PR testing installs [(#9470)](https://github.com/k3s-io/k3s/pull/9470) +* Update Kubernetes to v1.27.11 [(#9491)](https://github.com/k3s-io/k3s/pull/9491) +* Fix drone publish for arm [(#9509)](https://github.com/k3s-io/k3s/pull/9509) +* Remove failing Drone step [(#9515)](https://github.com/k3s-io/k3s/pull/9515) +* Restore original order of agent startup functions [(#9546)](https://github.com/k3s-io/k3s/pull/9546) +* Fix netpol startup when flannel is disabled [(#9579)](https://github.com/k3s-io/k3s/pull/9579) + +----- +## Release [v1.27.10+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.27.10+k3s2) + + +This release updates Kubernetes to v1.27.10, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1279). + +**Important Notes** + +Addresses the runc CVE: [CVE-2024-21626](https://nvd.nist.gov/vuln/detail/CVE-2024-21626) by updating runc to v1.1.12. + +### Changes since v1.27.9+k3s1: + +* Add a retry around updating a secrets-encrypt node annotations [(#9124)](https://github.com/k3s-io/k3s/pull/9124) +* Added support for env *_PROXY variables for agent loadbalancer [(#9117)](https://github.com/k3s-io/k3s/pull/9117) +* Wait for taint to be gone in the node before starting the netpol controller [(#9176)](https://github.com/k3s-io/k3s/pull/9176) +* Etcd condition [(#9182)](https://github.com/k3s-io/k3s/pull/9182) +* Backports for 2024-01 [(#9211)](https://github.com/k3s-io/k3s/pull/9211) +* Move proxy dialer out of init() and fix crash [(#9220)](https://github.com/k3s-io/k3s/pull/9220) +* Pin opa version for missing dependency chain [(#9217)](https://github.com/k3s-io/k3s/pull/9217) +* Etcd node is nil [(#9229)](https://github.com/k3s-io/k3s/pull/9229) +* Update to v1.27.10 and Go 1.20.13 [(#9261)](https://github.com/k3s-io/k3s/pull/9261) +* Use `ipFamilyPolicy: RequireDualStack` for dual-stack kube-dns [(#9270)](https://github.com/k3s-io/k3s/pull/9270) +* Backports for 2024-01 k3s2 [(#9337)](https://github.com/k3s-io/k3s/pull/9337) + * Bump runc to v1.1.12 and helm-controller to v0.15.7 + * Fix handling of bare hostname or IP as endpoint address in registries.yaml +* Bump helm-controller to fix issue with ChartContent [(#9347)](https://github.com/k3s-io/k3s/pull/9347) + +----- +## Release [v1.27.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.9+k3s1) + + +This release updates Kubernetes to v1.27.9, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1278). + +### Changes since v1.27.8+k3s2: + +* Bump containerd/runc to v1.7.10-k3s1/v1.1.10 [(#8963)](https://github.com/k3s-io/k3s/pull/8963) +* Fix overlapping address range [(#9018)](https://github.com/k3s-io/k3s/pull/9018) +* Runtimes backport [(#9013)](https://github.com/k3s-io/k3s/pull/9013) + * Added runtime classes for wasm/nvidia/crun + * Added default runtime flag for containerd +* Bump containerd to v1.7.11 [(#9041)](https://github.com/k3s-io/k3s/pull/9041) +* Update to v1.27.9-k3s1 [(#9078)](https://github.com/k3s-io/k3s/pull/9078) + +----- +## Release [v1.27.8+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.27.8+k3s2) + + +This release updates Kubernetes to v1.27.8, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1277). + +### Changes since v1.27.7+k3s2: + +* Etcd status condition [(#8821)](https://github.com/k3s-io/k3s/pull/8821) +* Add warning for removal of multiclustercidr flag [(#8759)](https://github.com/k3s-io/k3s/pull/8759) +* Backports for 2023-11 release [(#8878)](https://github.com/k3s-io/k3s/pull/8878) + * New timezone info in Docker image allows the use of `spec.timeZone` in CronJobs + * Bumped kine to v0.11.0 to resolve issues with postgres and NATS, fix performance of watch channels under heavy load, and improve compatibility with the reference implementation. + * Containerd may now be configured to use rdt or blockio configuration by defining `rdt_config.yaml` or `blockio_config.yaml` files. + * Add agent flag disable-apiserver-lb, agent will not start load balance proxy. + * Improved ingress IP ordering from ServiceLB + * Disable helm CRD installation for disable-helm-controller + * Omit snapshot list configmap entries for snapshots without extra metadata + * Add jitter to client config retry to avoid hammering servers when they are starting up +* Handle nil pointer when runtime core is not ready in etcd [(#8887)](https://github.com/k3s-io/k3s/pull/8887) +* Improve dualStack log [(#8828)](https://github.com/k3s-io/k3s/pull/8828) +* Bump dynamiclistener; reduce snapshot controller log spew [(#8902)](https://github.com/k3s-io/k3s/pull/8902) + * Bumped dynamiclistener to address a race condition that could cause a server to fail to sync its certificates into the Kubernetes secret + * Reduced etcd snapshot log spam during initial cluster startup +* Remove depends_on for e2e step; fix cert rotate e2e [(#8907)](https://github.com/k3s-io/k3s/pull/8907) +* Fix etcd snapshot S3 issues [(#8937)](https://github.com/k3s-io/k3s/pull/8937) + * Don't apply S3 retention if S3 client failed to initialize + * Don't request metadata when listing S3 snapshots + * Print key instead of file path in snapshot metadata log message +* Update to v1.27.8 and Go to 1.20.11 [(#8921)](https://github.com/k3s-io/k3s/pull/8921) +* Remove s390x [(#8999)](https://github.com/k3s-io/k3s/pull/8999) + +----- +## Release [v1.27.7+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.27.7+k3s2) + + +This release updates Kubernetes to v1.27.7, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1277). + +### Changes since v1.27.7+k3s1: + +* Fix SystemdCgroup in templates_linux.go [(#8765)](https://github.com/k3s-io/k3s/pull/8765) + * Fixed an issue with identifying additional container runtimes +* Update traefik chart to v25.0.0 [(#8775)](https://github.com/k3s-io/k3s/pull/8775) +* Update traefik to fix registry value [(#8789)](https://github.com/k3s-io/k3s/pull/8789) + +----- +## Release [v1.27.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.7+k3s1) + + +This release updates Kubernetes to v1.27.7, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1276). + +### Changes since v1.27.6+k3s1: + +* Fix error reporting [(#8411)](https://github.com/k3s-io/k3s/pull/8411) +* Add context to flannel errors [(#8419)](https://github.com/k3s-io/k3s/pull/8419) +* Include the interface name in the error message [(#8435)](https://github.com/k3s-io/k3s/pull/8435) +* Update kube-router [(#8443)](https://github.com/k3s-io/k3s/pull/8443) +* Add extraArgs to tailscale [(#8464)](https://github.com/k3s-io/k3s/pull/8464) +* Added error when cluster reset while using server flag [(#8455)](https://github.com/k3s-io/k3s/pull/8455) + * The user will receive a error when --cluster-reset with the --server flag +* Cluster reset from non bootstrap nodes [(#8451)](https://github.com/k3s-io/k3s/pull/8451) +* Take IPFamily precedence based on order [(#8504)](https://github.com/k3s-io/k3s/pull/8504) +* Fix spellcheck problem [(#8509)](https://github.com/k3s-io/k3s/pull/8509) +* Network defaults are duplicated, remove one [(#8551)](https://github.com/k3s-io/k3s/pull/8551) +* Advertise address integration test [(#8516)](https://github.com/k3s-io/k3s/pull/8516) +* System agent push tags fix [(#8569)](https://github.com/k3s-io/k3s/pull/8569) +* Fixed tailscale node IP dualstack mode in case of IPv4 only node [(#8558)](https://github.com/k3s-io/k3s/pull/8558) +* Server Token Rotation [(#8576)](https://github.com/k3s-io/k3s/pull/8576) + * Users can now rotate the server token using `k3s token rotate -t --new-token `. After command succeeds, all server nodes must be restarted with the new token. +* E2E Domain Drone Cleanup [(#8582)](https://github.com/k3s-io/k3s/pull/8582) +* Clear remove annotations on cluster reset [(#8587)](https://github.com/k3s-io/k3s/pull/8587) + * Fixed an issue that could cause k3s to attempt to remove members from the etcd cluster immediately following a cluster-reset/restore, if they were queued for removal at the time the snapshot was taken. +* Use IPv6 in case is the first configured IP with dualstack [(#8597)](https://github.com/k3s-io/k3s/pull/8597) +* Backports for 2023-10 release [(#8615)](https://github.com/k3s-io/k3s/pull/8615) +* Update kube-router package in build script [(#8634)](https://github.com/k3s-io/k3s/pull/8634) +* Add etcd-only/control-plane-only server test and fix control-plane-only server crash [(#8642)](https://github.com/k3s-io/k3s/pull/8642) +* Use `version.Program` not K3s in token rotate logs [(#8656)](https://github.com/k3s-io/k3s/pull/8656) +* Windows agent support [(#8650)](https://github.com/k3s-io/k3s/pull/8650) +* Fix CloudDualStackNodeIPs feature-gate inconsistency [(#8669)](https://github.com/k3s-io/k3s/pull/8669) +* Add --image-service-endpoint flag (#8279) [(#8662)](https://github.com/k3s-io/k3s/pull/8662) + * Add `--image-service-endpoint` flag to specify an external image service socket. +* Backport etcd fixes [(#8690)](https://github.com/k3s-io/k3s/pull/8690) + * Re-enable etcd endpoint auto-sync + * Manually requeue configmap reconcile when no nodes have reconciled snapshots +* Update to v1.27.7 and Go to v1.20.10 [(#8681)](https://github.com/k3s-io/k3s/pull/8681) +* Fix s3 snapshot restore [(#8733)](https://github.com/k3s-io/k3s/pull/8733) + +----- +## Release [v1.27.6+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.6+k3s1) + + +This release updates Kubernetes to v1.27.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1275). + +### Changes since v1.27.5+k3s1: + +* Bump kine to v0.10.3 [(#8324)](https://github.com/k3s-io/k3s/pull/8324) +* Update to v1.27.6 and Go to 1.20.8 [(#8356)](https://github.com/k3s-io/k3s/pull/8356) + * Bump embedded containerd to v1.7.6 + * Bump embedded stargz-snapshotter plugin to latest + * Fixed intermittent drone CI failures due to race conditions in test environment setup scripts + * Fixed CI failures due to changes to api discovery changes in Kubernetes 1.28 + +----- +## Release [v1.27.5+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.5+k3s1) + +This release updates Kubernetes to v1.27.5, and fixes a number of issues. + +:::warning Important +This release includes support for remediating CVE-2023-32187, a potential Denial of Service attack vector on K3s servers. See https://github.com/k3s-io/k3s/security/advisories/GHSA-m4hf-6vgr-75r2 for more information, including mandatory steps necessary to harden clusters against this vulnerability. +::: + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1274). + +### Changes since v1.27.4+k3s1: + +* Update cni plugins version to v1.3.0 [(#8056)](https://github.com/k3s-io/k3s/pull/8056) + * Upgraded cni-plugins to v1.3.0 +* Update flannel to v0.22.1 [(#8057)](https://github.com/k3s-io/k3s/pull/8057) + * Update flannel to v0.22.1 +* ADR on secrets encryption v3 [(#7938)](https://github.com/k3s-io/k3s/pull/7938) +* Unit test for MustFindString [(#8013)](https://github.com/k3s-io/k3s/pull/8013) +* Add support for using base template in etc/containerd/config.toml.tmpl [(#7991)](https://github.com/k3s-io/k3s/pull/7991) + * User-provided containerd config templates may now use `{{ template "base" . }}` to include the default K3s template content. This makes it easier to maintain user configuration if the only need is to add additional sections to the file. +* Make apiserver egress args conditional on egress-selector-mode [(#7972)](https://github.com/k3s-io/k3s/pull/7972) + * K3s no longer enables the apiserver's `enable-aggregator-routing` flag when the egress proxy is not being used to route connections to in-cluster endpoints. +* Security bump to `docker/distribution` [(#8047)](https://github.com/k3s-io/k3s/pull/8047) +* Fix coreos multiple installs [(#8083)](https://github.com/k3s-io/k3s/pull/8083) +* Update stable channel to v1.27.4+k3s1 [(#8067)](https://github.com/k3s-io/k3s/pull/8067) +* Fix tailscale bug with ip modes [(#8077)](https://github.com/k3s-io/k3s/pull/8077) +* Consolidate CopyFile functions [(#8079)](https://github.com/k3s-io/k3s/pull/8079) +* E2E: Support GOCOVER for more tests + fixes [(#8080)](https://github.com/k3s-io/k3s/pull/8080) +* Fix typo in terraform/README.md [(#8090)](https://github.com/k3s-io/k3s/pull/8090) +* Add FilterCN function to prevent SAN Stuffing [(#8085)](https://github.com/k3s-io/k3s/pull/8085) + * K3s's external apiserver listener now declines to add to its certificate any subject names not associated with the kubernetes apiserver service, server nodes, or values of the --tls-san option. This prevents the certificate's SAN list from being filled with unwanted entries. +* Bump docker/docker to master commit; cri-dockerd to 0.3.4 [(#8092)](https://github.com/k3s-io/k3s/pull/8092) + * Bump docker/docker module version to fix issues with cri-dockerd caused by recent releases of golang rejecting invalid host headers sent by the docker client. +* Bump versions for etcd, containerd, runc [(#8109)](https://github.com/k3s-io/k3s/pull/8109) + * Updated the embedded containerd to v1.7.3+k3s1 + * Updated the embedded runc to v1.1.8 + * Updated the embedded etcd to v3.5.9+k3s1 +* Etcd snapshots retention when node name changes [(#8099)](https://github.com/k3s-io/k3s/pull/8099) +* Bump kine to v0.10.2 [(#8125)](https://github.com/k3s-io/k3s/pull/8125) + * Updated kine to v0.10.2 +* Remove terraform package [(#8136)](https://github.com/k3s-io/k3s/pull/8136) +* Fix etcd-snapshot delete when etcd-s3 is true [(#8110)](https://github.com/k3s-io/k3s/pull/8110) +* Add --disable-cloud-controller and --disable-kube-proxy test [(#8018)](https://github.com/k3s-io/k3s/pull/8018) +* Use `go list -m` instead of grep to look up versions [(#8138)](https://github.com/k3s-io/k3s/pull/8138) +* Use VERSION_K8S in tests instead of grep go.mod [(#8147)](https://github.com/k3s-io/k3s/pull/8147) +* Fix for Kubeflag Integration test [(#8154)](https://github.com/k3s-io/k3s/pull/8154) +* Fix for cluster-reset backup from s3 when etcd snapshots are disabled [(#8155)](https://github.com/k3s-io/k3s/pull/8155) +* Run integration test CI in parallel [(#8156)](https://github.com/k3s-io/k3s/pull/8156) +* Bump Trivy version [(#8150)](https://github.com/k3s-io/k3s/pull/8150) +* Bump Trivy version [(#8178)](https://github.com/k3s-io/k3s/pull/8178) +* Fixed the etcd retention to delete orphaned snapshots based on the date [(#8177)](https://github.com/k3s-io/k3s/pull/8177) +* Bump dynamiclistener [(#8193)](https://github.com/k3s-io/k3s/pull/8193) + * Bumped dynamiclistener to address an issue that could cause the apiserver/supervisor listener on 6443 to stop serving requests on etcd-only nodes. + * The K3s external apiserver/supervisor listener on 6443 now sends a complete certificate chain in the TLS handshake. +* Bump helm-controller/klipper-helm versions [(#8204)](https://github.com/k3s-io/k3s/pull/8204) + * The version of `helm` used by the bundled helm controller's job image has been updated to v3.12.3 +* E2E: Add test for `k3s token` [(#8184)](https://github.com/k3s-io/k3s/pull/8184) +* Move flannel to 0.22.2 [(#8219)](https://github.com/k3s-io/k3s/pull/8219) + * Move flannel to v0.22.2 +* Update to v1.27.5 [(#8236)](https://github.com/k3s-io/k3s/pull/8236) +* Add new CLI flag to enable TLS SAN CN filtering [(#8257)](https://github.com/k3s-io/k3s/pull/8257) + * Added a new `--tls-san-security` option. This flag defaults to false, but can be set to true to disable automatically adding SANs to the server's TLS certificate to satisfy any hostname requested by a client. +* Add RWMutex to address controller [(#8273)](https://github.com/k3s-io/k3s/pull/8273) + +----- +## Release [v1.27.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.4+k3s1) + +This release updates Kubernetes to v1.27.4, and fixes a number of issues. +​ +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1273). +​ +### Changes since v1.27.3+k3s1: +​ +* Pkg imported more than once [(#7803)](https://github.com/k3s-io/k3s/pull/7803) +* Faster K3s Binary Build Option [(#7805)](https://github.com/k3s-io/k3s/pull/7805) +* Update stable channel to v1.27.3+k3s1 [(#7827)](https://github.com/k3s-io/k3s/pull/7827) +* Adding cli to custom klipper helm image [(#7682)](https://github.com/k3s-io/k3s/pull/7682) + * The default helm-controller job image can now be overridden with the --helm-job-image CLI flag +* Check if we are on ipv4, ipv6 or dualStack when doing tailscale [(#7838)](https://github.com/k3s-io/k3s/pull/7838) +* Remove file_windows.go [(#7845)](https://github.com/k3s-io/k3s/pull/7845) +* Add a k3s data directory location specified by the cli [(#7791)](https://github.com/k3s-io/k3s/pull/7791) +* Fix e2e startup flaky test [(#7839)](https://github.com/k3s-io/k3s/pull/7839) +* Allow k3s to customize apiServerPort on helm-controller [(#7834)](https://github.com/k3s-io/k3s/pull/7834) +* Fall back to basic/bearer auth when node identity auth is rejected [(#7836)](https://github.com/k3s-io/k3s/pull/7836) + * Resolved an issue that caused agents joined with kubeadm-style bootstrap tokens to fail to rejoin the cluster when their node object is deleted. +* Fix code spell check [(#7858)](https://github.com/k3s-io/k3s/pull/7858) +* Add e2e s3 test [(#7833)](https://github.com/k3s-io/k3s/pull/7833) +* Warn that v1.28 will deprecate reencrypt/prepare [(#7848)](https://github.com/k3s-io/k3s/pull/7848) +* Support setting control server URL for Tailscale [(#7807)](https://github.com/k3s-io/k3s/pull/7807) + * Support connecting tailscale to a separate server (e.g. headscale) +* Improve for K3s release Docs [(#7864)](https://github.com/k3s-io/k3s/pull/7864) +* Fix rootless node password location [(#7887)](https://github.com/k3s-io/k3s/pull/7887) +* Bump google.golang.org/grpc from 1.51.0 to 1.53.0 in /tests/terraform [(#7879)](https://github.com/k3s-io/k3s/pull/7879) +* Add retry for clone step [(#7862)](https://github.com/k3s-io/k3s/pull/7862) +* Generation of certificates and keys for etcd gated if etcd is disabled. [(#6998)](https://github.com/k3s-io/k3s/pull/6998) +* Don't use zgrep in `check-config` if apparmor profile is enforced [(#7939)](https://github.com/k3s-io/k3s/pull/7939) +* Fix image_scan.sh script and download trivy version [(#7950)](https://github.com/k3s-io/k3s/pull/7950) +* Revert "Warn that v1.28 will deprecate reencrypt/prepare" [(#7977)](https://github.com/k3s-io/k3s/pull/7977) +* Adjust default kubeconfig file permissions [(#7978)](https://github.com/k3s-io/k3s/pull/7978) +* Fix update go version command on release documentation [(#8028)](https://github.com/k3s-io/k3s/pull/8028) +* Update to v1.27.4 [(#8014)](https://github.com/k3s-io/k3s/pull/8014) +​ +----- +## Release [v1.27.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.3+k3s1) + +This release updates Kubernetes to v1.27.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1272). + +### Changes since v1.27.2+k3s1: + +* Update flannel version [(#7628)](https://github.com/k3s-io/k3s/pull/7628) + * Update flannel to v0.22.0 +* Add el9 selinux rpm [(#7635)](https://github.com/k3s-io/k3s/pull/7635) +* Update channels [(#7634)](https://github.com/k3s-io/k3s/pull/7634) +* Allow coredns override extensions [(#7583)](https://github.com/k3s-io/k3s/pull/7583) + * The `coredns-custom` ConfigMap now allows for `*.override` sections to be included in the `.:53` default server block. +* Bump klipper-lb to v0.4.4 [(#7617)](https://github.com/k3s-io/k3s/pull/7617) + * Bumped klipper-lb image to v0.4.4 to resolve an issue that prevented access to ServiceLB ports from localhost when the Service ExternalTrafficPolicy was set to Local. +* Bump metrics-server to v0.6.3 and update tls-cipher-suites [(#7564)](https://github.com/k3s-io/k3s/pull/7564) + * The bundled metrics-server has been bumped to v0.6.3, and now uses only secure TLS ciphers by default. +* Do not use the admin kubeconfig for the supervisor and core controllers [(#7616)](https://github.com/k3s-io/k3s/pull/7616) + * The K3s core controllers (supervisor, deploy, and helm) no longer use the admin kubeconfig. This makes it easier to determine from access and audit logs which actions are performed by the system, and which are performed by an administrative user. +* Bump golang:alpine image version [(#7619)](https://github.com/k3s-io/k3s/pull/7619) +* Make LB image configurable when compiling k3s [(#7626)](https://github.com/k3s-io/k3s/pull/7626) +* Bump vagrant libvirt with fix for plugin installs [(#7605)](https://github.com/k3s-io/k3s/pull/7605) +* Add format command on Makefile [(#7437)](https://github.com/k3s-io/k3s/pull/7437) +* Use el8 rpm for fedora 38 and 39 [(#7664)](https://github.com/k3s-io/k3s/pull/7664) +* Check variant before version to decide rpm target and packager closes #7666 [(#7667)](https://github.com/k3s-io/k3s/pull/7667) +* Test Coverage Reports for E2E tests [(#7526)](https://github.com/k3s-io/k3s/pull/7526) +* Soft-fail on node password verification if the secret cannot be created [(#7655)](https://github.com/k3s-io/k3s/pull/7655) + * K3s now allows nodes to join the cluster even if the node password secret cannot be created at the time the node joins. The secret create will be retried in the background. This resolves a potential deadlock created by fail-closed validating webhooks that block secret creation, where the webhook is unavailable until new nodes join the cluster to run the webhook pod. +* Enable containerd aufs/devmapper/zfs snapshotter plugins [(#7661)](https://github.com/k3s-io/k3s/pull/7661) + * The bundled containerd's aufs/devmapper/zfs snapshotter plugins have been restored. These were unintentionally omitted when moving containerd back into the k3s multicall binary in the previous release. +* Bump docker go.mod [(#7681)](https://github.com/k3s-io/k3s/pull/7681) +* Shortcircuit commands with version or help flags [(#7683)](https://github.com/k3s-io/k3s/pull/7683) + * Non root users can now call `k3s --help` and `k3s --version` commands without running into permission errors over the default config file. +* Bump Trivy version [(#7672)](https://github.com/k3s-io/k3s/pull/7672) +* E2E: Capture coverage of K3s subcommands [(#7686)](https://github.com/k3s-io/k3s/pull/7686) +* Integrate tailscale into k3s [(#7352)](https://github.com/k3s-io/k3s/pull/7352) + * Integration of tailscale VPN into k3s +* Add private registry e2e test [(#7653)](https://github.com/k3s-io/k3s/pull/7653) +* E2E: Remove unnecessary daemonset addition/deletion [(#7696)](https://github.com/k3s-io/k3s/pull/7696) +* Add issue template for OS validation [(#7695)](https://github.com/k3s-io/k3s/pull/7695) +* Fix spelling check [(#7740)](https://github.com/k3s-io/k3s/pull/7740) +* Remove useless libvirt config [(#7745)](https://github.com/k3s-io/k3s/pull/7745) +* Bump helm-controller to v0.15.0 for create-namespace support [(#7716)](https://github.com/k3s-io/k3s/pull/7716) + * The embedded helm controller has been bumped to v0.15.0, and now supports creating the chart's target namespace if it does not exist. +* Fix error logging in tailscale [(#7776)](https://github.com/k3s-io/k3s/pull/7776) +* Add commands to remove advertised routes of tailscale in k3s-killall.sh [(#7777)](https://github.com/k3s-io/k3s/pull/7777) +* Update Kubernetes to v1.27.3 [(#7790)](https://github.com/k3s-io/k3s/pull/7790) + +----- +## Release [v1.27.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.2+k3s1) + +This release updates Kubernetes to v1.27.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1271). + +### Changes since v1.27.1+k3s1: + +* Ensure that klog verbosity is set to the same level as logrus [(#7303)](https://github.com/k3s-io/k3s/pull/7303) +* Create CRDs with schema [(#7308)](https://github.com/k3s-io/k3s/pull/7308) + * Fixed an issue where Addon, HelmChart, and HelmChartConfig CRDs were created without structural schema, allowing the creation of custom resources of these types with invalid content. +* Bump k3s-root for aarch64 page size fix [(#7364)](https://github.com/k3s-io/k3s/pull/7364) + * K3s once again supports aarch64 nodes with page size > 4k +* Bump Runc and Containerd [(#7339)](https://github.com/k3s-io/k3s/pull/7339) +* Add integration tests for etc-snapshot server flags and refactor /tests/integration/integration.go/K3sStartServer [(#7300)](https://github.com/k3s-io/k3s/pull/7300) +* Bump traefik to v2.9.10 / chart 21.2.0 [(#7324)](https://github.com/k3s-io/k3s/pull/7324) + * The packaged Traefik version has been bumped to v2.9.10 / chart 21.2.0 +* Add longhorn storage test [(#6445)](https://github.com/k3s-io/k3s/pull/6445) +* Improve error message when CLI wrapper Exec fails [(#7373)](https://github.com/k3s-io/k3s/pull/7373) + * K3s now prints a more meaningful error when attempting to run from a filesystem mounted `noexec`. +* Fix issues with `--disable-agent` and `--egress-selector-mode=pod|cluster` [(#7331)](https://github.com/k3s-io/k3s/pull/7331) + * Servers started with the (experimental) --disable-agent flag no longer attempt to run the tunnel authorizer agent component. + * Fixed an regression that prevented the pod and cluster egress-selector modes from working properly. +* Retry cluster join on "too many learners" error [(#7351)](https://github.com/k3s-io/k3s/pull/7351) + * K3s now retries the cluster join operation when receiving a "too many learners" error from etcd. This most frequently occurred when attempting to add multiple servers at the same time. +* Fix MemberList error handling and incorrect etcd-arg passthrough [(#7371)](https://github.com/k3s-io/k3s/pull/7371) + * K3s now correctly passes through etcd-args to the temporary etcd that is used to extract cluster bootstrap data when restarting managed etcd nodes. + * K3s now properly handles errors obtaining the current etcd cluster member list when a new server is joining the managed etcd cluster. +* Bump Trivy version [(#7383)](https://github.com/k3s-io/k3s/pull/7383) +* Handle multiple arguments with StringSlice flags [(#7380)](https://github.com/k3s-io/k3s/pull/7380) +* Add v1.27 channel [(#7387)](https://github.com/k3s-io/k3s/pull/7387) +* Enable FindString to search dotD config files [(#7323)](https://github.com/k3s-io/k3s/pull/7323) +* Migrate netutil methods into /util/net.go [(#7422)](https://github.com/k3s-io/k3s/pull/7422) +* Local-storage: Fix permission [(#7217)](https://github.com/k3s-io/k3s/pull/7217) +* Bump cni plugins to v1.2.0-k3s1 [(#7425)](https://github.com/k3s-io/k3s/pull/7425) + * The bundled CNI plugins have been upgraded to v1.2.0-k3s1. The bandwidth and firewall plugins are now included in the bundle. +* Add dependabot label and reviewer [(#7423)](https://github.com/k3s-io/k3s/pull/7423) +* E2E: Startup test cleanup + RunCommand Enhancement [(#7388)](https://github.com/k3s-io/k3s/pull/7388) +* Fail to validate server tokens that use bootstrap id/secret format [(#7389)](https://github.com/k3s-io/k3s/pull/7389) + * K3s now exits with a proper error message when the server token uses a bootstrap token `id.secret` format. +* Fix token startup test [(#7442)](https://github.com/k3s-io/k3s/pull/7442) +* Bump kine to v0.10.1 [(#7414)](https://github.com/k3s-io/k3s/pull/7414) + * The embedded kine version has been bumped to v0.10.1. This replaces the legacy `lib/pq` postgres driver with `pgx`. +* Add kube-* server flags integration tests [(#7416)](https://github.com/k3s-io/k3s/pull/7416) +* Add support for `-cover` + integration test code coverage [(#7415)](https://github.com/k3s-io/k3s/pull/7415) +* Bump kube-router version to fix a bug when a port name is used [(#7454)](https://github.com/k3s-io/k3s/pull/7454) +* Consistently use constant-time comparison of password hashes instead of bare password strings [(#7455)](https://github.com/k3s-io/k3s/pull/7455) +* Bump containerd to v1.7.0 and move back into multicall binary [(#7418)](https://github.com/k3s-io/k3s/pull/7418) + * The embedded containerd version has been bumped to `v1.7.0-k3s1`, and has been reintegrated into the main k3s binary for a significant savings in release artifact size. +* Adding PITS and Getdeck Beiboot as adopters thanks to Schille and Miw… [(#7524)](https://github.com/k3s-io/k3s/pull/7524) +* Bump helm-controller version for repo auth/ca support [(#7525)](https://github.com/k3s-io/k3s/pull/7525) + * The embedded Helm controller now supports authenticating to chart repositories via credentials stored in a Secret, as well as passing repo CAs via ConfigMap. +* Bump containerd/runc to v1.7.1-k3s1/v1.1.7 [(#7533)](https://github.com/k3s-io/k3s/pull/7533) + * The bundled containerd and runc versions have been bumped to v1.7.1-k3s1/v1.1.7 +* Wrap error stating that it is coming from netpol [(#7539)](https://github.com/k3s-io/k3s/pull/7539) +* Add Rotation certification Check, remove func to restart agents [(#7097)](https://github.com/k3s-io/k3s/pull/7097) +* Bump alpine from 3.17 to 3.18 in /package [(#7550)](https://github.com/k3s-io/k3s/pull/7550) +* Bump alpine from 3.17 to 3.18 in /conformance [(#7551)](https://github.com/k3s-io/k3s/pull/7551) +* Add '-all' flag to apply to inactive systemd units [(#7567)](https://github.com/k3s-io/k3s/pull/7567) +* Update to v1.27.2-k3s1 [(#7575)](https://github.com/k3s-io/k3s/pull/7575) +* Fix iptables rules clean during upgrade [(#7591)](https://github.com/k3s-io/k3s/pull/7591) +* Pin emicklei/go-restful to v3.9.0 [(#7597)](https://github.com/k3s-io/k3s/pull/7597) +* Add el9 selinux rpm [(#7443)](https://github.com/k3s-io/k3s/pull/7443) +* Revert "Add el9 selinux rpm (#7443)" [(#7608)](https://github.com/k3s-io/k3s/pull/7608) + +----- +## Release [v1.27.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.1+k3s1) + +This release is K3S's first in the v1.27 line. This release updates Kubernetes to v1.27.1. + +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#urgent-upgrade-notes). + +### Changes since v1.26.4+k3s1: + +* Kubernetes 1.27.1 [(#7271)](https://github.com/k3s-io/k3s/pull/7271) +* V1.27.1 CLI Deprecation [(#7311)](https://github.com/k3s-io/k3s/pull/7311) + * `--flannel-backed=wireguard` has been completely replaced with `--flannel-backend=wireguard-native` + * The `k3s etcd-snapshot` command will now print a help message, to save a snapshot use: `k3s etcd-snapshot save` + * The following flags will now cause fatal errors (with full removal coming in v1.28.0): + * `--flannel-backed=ipsec`: replaced with `--flannel-backend=wireguard-native` [see docs for more info.](https://docs.k3s.io/installation/network-options#migrating-from-wireguard-or-ipsec-to-wireguard-native) + * Supplying multiple `--flannel-backend` values is no longer valid. Use `--flannel-conf` instead. +* Changed command -v redirection for iptables bin check [(#7315)](https://github.com/k3s-io/k3s/pull/7315) +* Update channel server for april 2023 [(#7327)](https://github.com/k3s-io/k3s/pull/7327) +* Bump cri-dockerd [(#7347)](https://github.com/k3s-io/k3s/pull/7347) +* Cleanup help messages [(#7369)](https://github.com/k3s-io/k3s/pull/7369) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.28.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.28.X.md new file mode 100644 index 000000000..f181063fc --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.28.X.md @@ -0,0 +1,529 @@ +--- +hide_table_of_contents: true +sidebar_position: 5 +--- + +# v1.28.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.28.15+k3s1](v1.28.X.md#release-v12815k3s1) | Oct 26 2024| [v1.28.15](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12815) | [v0.13.2](https://github.com/k3s-io/kine/releases/tag/v0.13.2) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.22-k3s1.28](https://github.com/k3s-io/containerd/releases/tag/v1.7.22-k3s1.28) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.15.15](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.15) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.28.14+k3s1](v1.28.X.md#release-v12814k3s1) | Sep 19 2024| [v1.28.14](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12814) | [v0.12.0](https://github.com/k3s-io/kine/releases/tag/v0.12.0) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.21-k3s2.28](https://github.com/k3s-io/containerd/releases/tag/v1.7.21-k3s2.28) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.8](https://github.com/traefik/traefik/releases/tag/v2.11.8) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.15.13](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.13) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.28.13+k3s1](v1.28.X.md#release-v12813k3s1) | Aug 21 2024| [v1.28.13](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12813) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.20-k3s2.28](https://github.com/k3s-io/containerd/releases/tag/v1.7.20-k3s2.28) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.28.12+k3s1](v1.28.X.md#release-v12812k3s1) | Jul 31 2024| [v1.28.12](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12812) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1.28](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1.28) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.28.11+k3s2](v1.28.X.md#release-v12811k3s2) | Jul 03 2024| [v1.28.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12811) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1.28](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1.28) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.28.11+k3s1](v1.28.X.md#release-v12811k3s1) | Jun 25 2024| [v1.28.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12811) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1.28](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1.28) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.2](https://github.com/flannel-io/flannel/releases/tag/v0.25.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.28.10+k3s1](v1.28.X.md#release-v12810k3s1) | May 22 2024| [v1.28.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12810) | [v0.11.7](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.28.9+k3s1](v1.28.X.md#release-v1289k3s1) | Apr 25 2024| [v1.28.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1289) | [v0.11.7](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.28.8+k3s1](v1.28.X.md#release-v1288k3s1) | Mar 25 2024| [v1.28.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1288) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.28.7+k3s1](v1.28.X.md#release-v1287k3s1) | Feb 29 2024| [v1.28.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1287) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.12-k3s1](https://github.com/k3s-io/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.28.6+k3s2](v1.28.X.md#release-v1286k3s2) | Feb 06 2024| [v1.28.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1286) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.28.5+k3s1](v1.28.X.md#release-v1285k3s1) | Dec 27 2023| [v1.28.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1285) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.10](https://github.com/opencontainers/runc/releases/tag/v1.1.10) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.28.4+k3s2](v1.28.X.md#release-v1284k3s2) | Dec 06 2023| [v1.28.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1284) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.28.3+k3s2](v1.28.X.md#release-v1283k3s2) | Nov 08 2023| [v1.28.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1283) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.28.3+k3s1](v1.28.X.md#release-v1283k3s1) | Oct 30 2023| [v1.28.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1283) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.7-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.7-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.28.2+k3s1](v1.28.X.md#release-v1282k3s1) | Sep 20 2023| [v1.28.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1282) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.6-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.6-k3s1) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.28.1+k3s1](v1.28.X.md#release-v1281k3s1) | Sep 08 2023| [v1.28.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1281) | [v0.10.3](https://github.com/k3s-io/kine/releases/tag/v0.10.3) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.3-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.3-k3s2) | [v1.1.8](https://github.com/opencontainers/runc/releases/tag/v1.1.8) | [v0.22.2](https://github.com/flannel-io/flannel/releases/tag/v0.22.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.9.10](https://github.com/traefik/traefik/releases/tag/v2.9.10) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | + +
+ +## Release [v1.28.15+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.15+k3s1) + + +This release updates Kubernetes to v1.28.15, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v12814). + +### Changes since v1.28.14+k3s1: + +* Add int test for flannel-ipv6masq [(#10906)](https://github.com/k3s-io/k3s/pull/10906) +* Bump Wharfie to v0.6.7 [(#10977)](https://github.com/k3s-io/k3s/pull/10977) +* Add user path to runtimes search [(#11005)](https://github.com/k3s-io/k3s/pull/11005) +* Add e2e test for advanced fields in services [(#11020)](https://github.com/k3s-io/k3s/pull/11020) +* Launch private registry with init [(#11045)](https://github.com/k3s-io/k3s/pull/11045) +* Backports for 2024-10 [(#11063)](https://github.com/k3s-io/k3s/pull/11063) +* Allow additional Rootless CopyUpDirs through K3S_ROOTLESS_COPYUPDIRS [(#11042)](https://github.com/k3s-io/k3s/pull/11042) +* Bump containerd to v1.7.22 [(#11075)](https://github.com/k3s-io/k3s/pull/11075) +* Add the nvidia runtime cdi [(#11095)](https://github.com/k3s-io/k3s/pull/11095) +* Simplify svclb ds [(#11085)](https://github.com/k3s-io/k3s/pull/11085) +* Revert "Make svclb as simple as possible" [(#11115)](https://github.com/k3s-io/k3s/pull/11115) +* Fixes "file exists" error from CNI bins when upgrading k3s [(#11128)](https://github.com/k3s-io/k3s/pull/11128) +* Update to Kubernetes v1.28.15-k3s1 and Go 1.22.8 [(#11161)](https://github.com/k3s-io/k3s/pull/11161) + +----- +## Release [v1.28.14+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.14+k3s1) + + +This release updates Kubernetes to v1.28.14, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v12813). + +### Changes since v1.28.13+k3s1: + +* Testing Backports for 2024-09 [(#10804)](https://github.com/k3s-io/k3s/pull/10804) + * Update to newer OS images for install testing + * Fix caching name for e2e vagrant box + * Fix deploy latest commit on E2E tests + * DRY E2E Upgrade test setup + * Cover edge case when on new minor release for E2E upgrade test +* Update CNI plugins version [(#10820)](https://github.com/k3s-io/k3s/pull/10820) +* Backports for 2024-09 [(#10845)](https://github.com/k3s-io/k3s/pull/10845) +* Fix hosts.toml header var [(#10874)](https://github.com/k3s-io/k3s/pull/10874) +* Update to v1.28.14-k3s1 and Go 1.22.6 [(#10884)](https://github.com/k3s-io/k3s/pull/10884) +* Update Kubernetes to v1.28.14-k3s2 [(#10907)](https://github.com/k3s-io/k3s/pull/10907) + +----- +## Release [v1.28.13+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.13+k3s1) + + +This release updates Kubernetes to v1.28.13, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v12812). + +### Changes since v1.28.12+k3s1: + +* Fixing setproctitle function [(#10624)](https://github.com/k3s-io/k3s/pull/10624) +* Bump docker/docker to v24.0.10-0.20240723193628-852759a7df45 [(#10651)](https://github.com/k3s-io/k3s/pull/10651) +* Backports for 2024-08 release cycle [(#10666)](https://github.com/k3s-io/k3s/pull/10666) + * Use pagination when listing large numbers of resources + * Fix multiple issues with servicelb + * Remove deprecated use of wait. functions + * Wire lasso metrics up to metrics endpoint +* Backports for August 2024 [(#10673)](https://github.com/k3s-io/k3s/pull/10673) +* Bump containerd to v1.7.20 [(#10662)](https://github.com/k3s-io/k3s/pull/10662) +* Add tolerations support for DaemonSet pods [(#10705)](https://github.com/k3s-io/k3s/pull/10705) + * **New Feature**: Users can now define Kubernetes tolerations for ServiceLB DaemonSet directly in the `svccontroller.k3s.cattle.io/tolerations` annotation on services. +* Update to v1.28.13-k3s1 and Go 1.22.5 [(#10719)](https://github.com/k3s-io/k3s/pull/10719) + +----- +## Release [v1.28.12+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.12+k3s1) + + +This release updates Kubernetes to v1.28.12, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v12811). + +### Changes since v1.28.11+k3s2: + +* Backports for 2024-07 release cycle [(#10499)](https://github.com/k3s-io/k3s/pull/10499) + * Bump k3s-root to v0.14.0 + * Bump github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.7 + * Bump Local Path Provisioner version + * Ensure remotedialer kubelet connections use kubelet bind address + * Chore: Bump Trivy version + * Add etcd s3 config secret implementation +* July Test Backports [(#10509)](https://github.com/k3s-io/k3s/pull/10509) +* Update to v1.28.12-k3s1 and Go 1.22.5 [(#10541)](https://github.com/k3s-io/k3s/pull/10541) +* Fix issues loading data-dir value from env vars or dropping config files [(#10598)](https://github.com/k3s-io/k3s/pull/10598) + +----- +## Release [v1.28.11+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.28.11+k3s2) + + +This release updates Kubernetes to v1.28.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v12811). + +### Changes since v1.28.11+k3s1: + +* Update flannel to v0.25.4 and fixed issue with IPv6 mask [(#10428)](https://github.com/k3s-io/k3s/pull/10428) + +----- +## Release [v1.28.11+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.11+k3s1) + + +This release updates Kubernetes to v1.28.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v12810). + +### Changes since v1.28.10+k3s1: + +* Replace deprecated ruby function [(#10090)](https://github.com/k3s-io/k3s/pull/10090) +* Fix bug when using tailscale config by file [(#10144)](https://github.com/k3s-io/k3s/pull/10144) +* Bump flannel version to v0.25.2 [(#10221)](https://github.com/k3s-io/k3s/pull/10221) +* Update kube-router version to v2.1.2 [(#10182)](https://github.com/k3s-io/k3s/pull/10182) +* Improve tailscale test & add extra log in e2e tests [(#10213)](https://github.com/k3s-io/k3s/pull/10213) +* Backports for 2024-06 release cycle [(#10258)](https://github.com/k3s-io/k3s/pull/10258) + * Add WithSkipMissing to not fail import on missing blobs + * Use fixed stream server bind address for cri-dockerd + * Switch stargz over to cri registry config_path + * Bump to containerd v1.7.17, etcd v3.5.13 + * Bump spegel version + * Fix issue with externalTrafficPolicy: Local for single-stack services on dual-stack nodes + * ServiceLB now sets the priorityClassName on svclb pods to `system-node-critical` by default. This can be overridden on a per-service basis via the `svccontroller.k3s.cattle.io/priorityclassname` annotation. + * Bump minio-go to v7.0.70 + * Bump kine to v0.11.9 to fix pagination + * Update valid resolv conf + * Add missing kernel config check + * Symlinked sub-directories are now respected when scanning Auto-Deploying Manifests (AddOns) + * Fix bug: allow helm controller set owner reference + * Bump klipper-helm image for tls secret support + * Fix issue with k3s-etcd informers not starting + * `--Enable-pprof` can now be set on agents to enable the debug/pprof endpoints. When set, agents will listen on the supervisor port. + * `--Supervisor-metrics` can now be set on servers to enable serving internal metrics on the supervisor endpoint; when set agents will listen on the supervisor port. + * Fix netpol crash when node remains tainted uninitialized + * The embedded load-balancer will now fall back to trying all servers with health-checks ignored, if all servers have been marked unavailable due to failed health checks. +* More backports for 2024-06 release cycle [(#10289)](https://github.com/k3s-io/k3s/pull/10289) +* Add snapshot retention etcd-s3-folder fix [(#10315)](https://github.com/k3s-io/k3s/pull/10315) +* Add test for `isValidResolvConf` (#10302) [(#10331)](https://github.com/k3s-io/k3s/pull/10331) +* Fix race condition panic in loadbalancer.nextServer [(#10323)](https://github.com/k3s-io/k3s/pull/10323) +* Fix typo, use `rancher/permissions` [(#10299)](https://github.com/k3s-io/k3s/pull/10299) +* Update Kubernetes to v1.28.11 [(#10347)](https://github.com/k3s-io/k3s/pull/10347) +* Fix agent supervisor port using apiserver port instead [(#10355)](https://github.com/k3s-io/k3s/pull/10355) +* Fix issue that allowed multiple simultaneous snapshots to be allowed [(#10377)](https://github.com/k3s-io/k3s/pull/10377) + +----- +## Release [v1.28.10+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.10+k3s1) + + +This release updates Kubernetes to v1.28.10, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1289). + +### Changes since v1.28.9+k3s1: + +* Bump E2E opensuse leap to 15.6, fix btrfs test [(#10095)](https://github.com/k3s-io/k3s/pull/10095) +* Windows changes [(#10114)](https://github.com/k3s-io/k3s/pull/10114) +* Update to v1.28.10-k3s1 [(#10098)](https://github.com/k3s-io/k3s/pull/10098) + +----- +## Release [v1.28.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.9+k3s1) + + +This release updates Kubernetes to v1.28.9, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1288). + +### Changes since v1.28.8+k3s1: + +* Add a new error when kine is with disable apiserver or disable etcd [(#9804)](https://github.com/k3s-io/k3s/pull/9804) +* Remove old pinned dependencies [(#9827)](https://github.com/k3s-io/k3s/pull/9827) +* Transition from deprecated pointer library to ptr [(#9824)](https://github.com/k3s-io/k3s/pull/9824) +* Golang caching and E2E ubuntu 23.10 [(#9821)](https://github.com/k3s-io/k3s/pull/9821) +* Add tls for kine [(#9849)](https://github.com/k3s-io/k3s/pull/9849) +* Bump spegel to v0.0.20-k3s1 [(#9880)](https://github.com/k3s-io/k3s/pull/9880) +* Backports for 2024-04 release cycle [(#9911)](https://github.com/k3s-io/k3s/pull/9911) + * Send error response if member list cannot be retrieved + * The k3s stub cloud provider now respects the kubelet's requested provider-id, instance type, and topology labels + * Fix error when image has already been pulled + * Add /etc/passwd and /etc/group to k3s docker image + * Fix etcd snapshot reconcile for agentless servers + * Add health-check support to loadbalancer + * Add certificate expiry check, events, and metrics + * Add workaround for containerd hosts.toml bug when passing config for default registry endpoint + * Add supervisor cert/key to rotate list + * The embedded containerd has been bumped to v1.7.15 + * The embedded cri-dockerd has been bumped to v0.3.12 + * The `k3s etcd-snapshot` command has been reworked for improved consistency. All snapshots operations are now performed by the server process, with the CLI acting as a client to initiate and report results. As a side effect, the CLI is now less noisy when managing snapshots. + * Improve etcd load-balancer startup behavior + * Actually fix agent certificate rotation + * Traefik has been bumped to v2.10.7. + * Traefik pod annotations are now set properly in the default chart values. + * The system-default-registry value now supports RFC2732 IPv6 literals. + * The local-path provisioner now defaults to creating `local` volumes, instead of `hostPath`. +* Allow LPP to read helper logs [(#9938)](https://github.com/k3s-io/k3s/pull/9938) +* Update kube-router to v2.1.0 [(#9942)](https://github.com/k3s-io/k3s/pull/9942) +* Update to v1.28.9-k3s1 and Go 1.21.9 [(#9959)](https://github.com/k3s-io/k3s/pull/9959) +* Fix on-demand snapshots timing out; not honoring folder [(#9994)](https://github.com/k3s-io/k3s/pull/9994) +* Make /db/info available anonymously from localhost [(#10002)](https://github.com/k3s-io/k3s/pull/10002) + +----- +## Release [v1.28.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.8+k3s1) + + +This release updates Kubernetes to v1.28.8, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1287). + +### Changes since v1.28.7+k3s1: + +* Add an integration test for flannel-backend=none [(#9608)](https://github.com/k3s-io/k3s/pull/9608) +* Install and Unit test backports [(#9641)](https://github.com/k3s-io/k3s/pull/9641) +* Update klipper-lb image version [(#9605)](https://github.com/k3s-io/k3s/pull/9605) +* Chore(deps): Remediating CVE-2023-45142 CVE-2023-48795 [(#9647)](https://github.com/k3s-io/k3s/pull/9647) +* Adjust first node-ip based on configured clusterCIDR [(#9631)](https://github.com/k3s-io/k3s/pull/9631) +* Improve tailscale e2e test [(#9653)](https://github.com/k3s-io/k3s/pull/9653) +* Backports for 2024-03 release cycle [(#9669)](https://github.com/k3s-io/k3s/pull/9669) + * Fix: use correct wasm shims names + * The embedded flannel cni-plugin binary is now built and versioned separate from the rest of the cni plugins and the embedded flannel controller. + * Bump spegel to v0.0.18-k3s3 + * Adds wildcard registry support + * Fixes issue with excessive CPU utilization while waiting for containerd to start + * Add env var to allow spegel mirroring of latest tag + * Tweak netpol node wait logs + * Fix coredns NodeHosts on dual-stack clusters + * Bump helm-controller/klipper-helm versions + * Fix snapshot prune + * Fix issue with etcd node name missing hostname + * Rootless mode should also bind service nodePort to host for LoadBalancer type, matching UX of rootful mode. + * To enable raw output for the `check-config` subcommand, you may now set NO_COLOR=1 + * Fix additional corner cases in registries handling + * Bump metrics-server to v0.7.0 + * K3s will now warn and suppress duplicate entries in the mirror endpoint list for a registry. Containerd does not support listing the same endpoint multiple times as a mirror for a single upstream registry. +* Docker and E2E Test Backports [(#9707)](https://github.com/k3s-io/k3s/pull/9707) +* Fix wildcard entry upstream fallback [(#9733)](https://github.com/k3s-io/k3s/pull/9733) +* Update to v1.28.8-k3s1 and Go 1.21.8 [(#9746)](https://github.com/k3s-io/k3s/pull/9746) + +----- +## Release [v1.28.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.7+k3s1) + + +This release updates Kubernetes to v1.28.7, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1286). + +### Changes since v1.28.6+k3s2: + +* Chore: bump Local Path Provisioner version [(#9426)](https://github.com/k3s-io/k3s/pull/9426) +* Bump cri-dockerd to fix compat with Docker Engine 25 [(#9293)](https://github.com/k3s-io/k3s/pull/9293) +* Auto Dependency Bump [(#9419)](https://github.com/k3s-io/k3s/pull/9419) +* Runtimes refactor using exec.LookPath [(#9431)](https://github.com/k3s-io/k3s/pull/9431) + * Directories containing runtimes need to be included in the $PATH environment variable for effective runtime detection. +* Changed how lastHeartBeatTime works in the etcd condition [(#9424)](https://github.com/k3s-io/k3s/pull/9424) +* Bump Flannel v0.24.2 + remove multiclustercidr [(#9401)](https://github.com/k3s-io/k3s/pull/9401) +* Allow executors to define containerd and docker behavior [(#9254)](https://github.com/k3s-io/k3s/pull/9254) +* Update Kube-router to v2.0.1 [(#9404)](https://github.com/k3s-io/k3s/pull/9404) +* Backports for 2024-02 release cycle [(#9462)](https://github.com/k3s-io/k3s/pull/9462) +* Enable longer http timeout requests [(#9444)](https://github.com/k3s-io/k3s/pull/9444) +* Test_UnitApplyContainerdQoSClassConfigFileIfPresent [(#9440)](https://github.com/k3s-io/k3s/pull/9440) +* Support PR testing installs [(#9469)](https://github.com/k3s-io/k3s/pull/9469) +* Update Kubernetes to v1.28.7 [(#9492)](https://github.com/k3s-io/k3s/pull/9492) +* Fix drone publish for arm [(#9508)](https://github.com/k3s-io/k3s/pull/9508) +* Remove failing Drone step [(#9516)](https://github.com/k3s-io/k3s/pull/9516) +* Restore original order of agent startup functions [(#9545)](https://github.com/k3s-io/k3s/pull/9545) +* Fix netpol startup when flannel is disabled [(#9578)](https://github.com/k3s-io/k3s/pull/9578) + +----- +## Release [v1.28.6+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.28.6+k3s2) + + +This release updates Kubernetes to v1.28.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1285). + +**Important Notes** + +Addresses the runc CVE: [CVE-2024-21626](https://nvd.nist.gov/vuln/detail/CVE-2024-21626) by updating runc to v1.1.12. + +### Changes since v1.28.5+k3s1: + +* Add a retry around updating a secrets-encrypt node annotations [(#9125)](https://github.com/k3s-io/k3s/pull/9125) +* Wait for taint to be gone in the node before starting the netpol controller [(#9175)](https://github.com/k3s-io/k3s/pull/9175) +* Etcd condition [(#9181)](https://github.com/k3s-io/k3s/pull/9181) +* Backports for 2024-01 [(#9203)](https://github.com/k3s-io/k3s/pull/9203) +* Pin opa version for missing dependency chain [(#9216)](https://github.com/k3s-io/k3s/pull/9216) +* Added support for env *_PROXY variables for agent loadbalancer [(#9206)](https://github.com/k3s-io/k3s/pull/9206) +* Etcd node is nil [(#9228)](https://github.com/k3s-io/k3s/pull/9228) +* Update to v1.28.6 and Go 1.20.13 [(#9260)](https://github.com/k3s-io/k3s/pull/9260) +* Use `ipFamilyPolicy: RequireDualStack` for dual-stack kube-dns [(#9269)](https://github.com/k3s-io/k3s/pull/9269) +* Backports for 2024-01 k3s2 [(#9336)](https://github.com/k3s-io/k3s/pull/9336) + * Bump runc to v1.1.12 and helm-controller to v0.15.7 + * Fix handling of bare hostname or IP as endpoint address in registries.yaml +* Bump helm-controller to fix issue with ChartContent [(#9346)](https://github.com/k3s-io/k3s/pull/9346) + +----- +## Release [v1.28.5+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.5+k3s1) + + +This release updates Kubernetes to v1.28.5, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1284). + +### Changes since v1.28.4+k3s1: + +* Remove s390x steps temporarily since runners are disabled [(#8983)](https://github.com/k3s-io/k3s/pull/8983) +* Remove s390x from manifest [(#8998)](https://github.com/k3s-io/k3s/pull/8998) +* Fix overlapping address range [(#8913)](https://github.com/k3s-io/k3s/pull/8913) +* Modify CONTRIBUTING.md guide [(#8954)](https://github.com/k3s-io/k3s/pull/8954) +* Nov 2023 stable channel update [(#9022)](https://github.com/k3s-io/k3s/pull/9022) +* Default runtime and runtime classes for wasm/nvidia/crun [(#8936)](https://github.com/k3s-io/k3s/pull/8936) + * Added runtime classes for wasm/nvidia/crun + * Added default runtime flag for containerd +* Bump containerd/runc to v1.7.10-k3s1/v1.1.10 [(#8962)](https://github.com/k3s-io/k3s/pull/8962) +* Allow setting default-runtime on servers [(#9027)](https://github.com/k3s-io/k3s/pull/9027) +* Bump containerd to v1.7.11 [(#9040)](https://github.com/k3s-io/k3s/pull/9040) +* Update to v1.28.5-k3s1 [(#9081)](https://github.com/k3s-io/k3s/pull/9081) + +----- +## Release [v1.28.4+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.28.4+k3s2) + + +This release updates Kubernetes to v1.28.4, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1283). + +### Changes since v1.28.3+k3s2: + +* Update channels latest to v1.27.7+k3s2 [(#8799)](https://github.com/k3s-io/k3s/pull/8799) +* Add etcd status condition [(#8724)](https://github.com/k3s-io/k3s/pull/8724) + * Now the user can see the etcd status from each node in a simple way +* ADR for etcd status [(#8355)](https://github.com/k3s-io/k3s/pull/8355) +* Wasm shims detection [(#8751)](https://github.com/k3s-io/k3s/pull/8751) + * Automatic discovery of WebAssembly runtimes +* Add warning for removal of multiclustercidr flag [(#8758)](https://github.com/k3s-io/k3s/pull/8758) +* Improve dualStack log [(#8798)](https://github.com/k3s-io/k3s/pull/8798) +* Optimize: Simplify and clean up Dockerfile [(#8244)](https://github.com/k3s-io/k3s/pull/8244) +* Add: timezone info in image [(#8764)](https://github.com/k3s-io/k3s/pull/8764) + * - New timezone info in Docker image allows the use of `spec.timeZone` in CronJobs +* Bump kine to fix nats, postgres, and watch issues [(#8778)](https://github.com/k3s-io/k3s/pull/8778) + * Bumped kine to v0.11.0 to resolve issues with postgres and NATS, fix performance of watch channels under heavy load, and improve compatibility with the reference implementation. +* QoS-class resource configuration [(#8726)](https://github.com/k3s-io/k3s/pull/8726) + * Containerd may now be configured to use rdt or blockio configuration by defining `rdt_config.yaml` or `blockio_config.yaml` files. +* Add agent flag disable-apiserver-lb [(#8717)](https://github.com/k3s-io/k3s/pull/8717) + * Add agent flag disable-apiserver-lb, agent will not start load balance proxy. +* Force umount for NFS mount (like with longhorn) [(#8521)](https://github.com/k3s-io/k3s/pull/8521) +* General updates to README [(#8786)](https://github.com/k3s-io/k3s/pull/8786) +* Fix wrong warning from restorecon in install script [(#8871)](https://github.com/k3s-io/k3s/pull/8871) +* Fix issue with snapshot metadata configmap [(#8835)](https://github.com/k3s-io/k3s/pull/8835) + * Omit snapshot list configmap entries for snapshots without extra metadata +* Skip initial datastore reconcile during cluster-reset [(#8861)](https://github.com/k3s-io/k3s/pull/8861) +* Tweaked order of ingress IPs in ServiceLB [(#8711)](https://github.com/k3s-io/k3s/pull/8711) + * Improved ingress IP ordering from ServiceLB +* Disable helm CRD installation for disable-helm-controller [(#8702)](https://github.com/k3s-io/k3s/pull/8702) +* More improves for K3s patch release docs [(#8800)](https://github.com/k3s-io/k3s/pull/8800) +* Update install.sh sha256sum [(#8885)](https://github.com/k3s-io/k3s/pull/8885) +* Add jitter to client config retry to avoid hammering servers when they are starting up [(#8863)](https://github.com/k3s-io/k3s/pull/8863) +* Handle nil pointer when runtime core is not ready in etcd [(#8886)](https://github.com/k3s-io/k3s/pull/8886) +* Bump dynamiclistener; reduce snapshot controller log spew [(#8894)](https://github.com/k3s-io/k3s/pull/8894) + * Bumped dynamiclistener to address a race condition that could cause a server to fail to sync its certificates into the Kubernetes secret + * Reduced etcd snapshot log spam during initial cluster startup +* Remove depends_on for e2e step; fix cert rotate e2e [(#8906)](https://github.com/k3s-io/k3s/pull/8906) +* Fix etcd snapshot S3 issues [(#8926)](https://github.com/k3s-io/k3s/pull/8926) + * Don't apply S3 retention if S3 client failed to initialize + * Don't request metadata when listing S3 snapshots + * Print key instead of file path in snapshot metadata log message +* Update to v1.28.4 and Go to v1.20.11 [(#8920)](https://github.com/k3s-io/k3s/pull/8920) +* Remove s390x steps temporarily since runners are disabled [(#8983)](https://github.com/k3s-io/k3s/pull/8983) +* Remove s390x from manifest [(#8998)](https://github.com/k3s-io/k3s/pull/8998) + +----- +## Release [v1.28.3+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.28.3+k3s2) + + +This release updates Kubernetes to v1.28.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1283). + +### Changes since v1.28.3+k3s1: + +* Restore selinux context systemd unit file [(#8593)](https://github.com/k3s-io/k3s/pull/8593) +* Update channel to v1.27.7+k3s1 [(#8753)](https://github.com/k3s-io/k3s/pull/8753) +* Bump Sonobuoy version [(#8710)](https://github.com/k3s-io/k3s/pull/8710) +* Bump Trivy version [(#8739)](https://github.com/k3s-io/k3s/pull/8739) +* Fix: Access outer scope .SystemdCgroup [(#8761)](https://github.com/k3s-io/k3s/pull/8761) + * Fixed failing to start with nvidia-container-runtime +* Upgrade traefik chart to v25.0.0 [(#8771)](https://github.com/k3s-io/k3s/pull/8771) +* Update traefik to fix registry value [(#8792)](https://github.com/k3s-io/k3s/pull/8792) +* Don't use iptables-save/iptables-restore if it will corrupt rules [(#8795)](https://github.com/k3s-io/k3s/pull/8795) + +----- +## Release [v1.28.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.3+k3s1) + + +This release updates Kubernetes to v1.28.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1282). + +### Changes since v1.28.2+k3s1: + +* Fix error reporting [(#8250)](https://github.com/k3s-io/k3s/pull/8250) +* Add context to flannel errors [(#8284)](https://github.com/k3s-io/k3s/pull/8284) +* Update channel, September patch release [(#8397)](https://github.com/k3s-io/k3s/pull/8397) +* Add missing link to drone in documentation [(#8295)](https://github.com/k3s-io/k3s/pull/8295) +* Include the interface name in the error message [(#8346)](https://github.com/k3s-io/k3s/pull/8346) +* Add extraArgs to vpn provider [(#8354)](https://github.com/k3s-io/k3s/pull/8354) + * Allow to pass extra args to the vpn provider +* Disable HTTP on main etcd client port [(#8402)](https://github.com/k3s-io/k3s/pull/8402) + * Embedded etcd no longer serves http requests on the client port, only grpc. This addresses a performance issue that could cause watch stream starvation under load. For more information, see https://github.com/etcd-io/etcd/issues/15402 +* Server token rotation [(#8215)](https://github.com/k3s-io/k3s/pull/8215) +* Fix issues with etcd member removal after reset [(#8392)](https://github.com/k3s-io/k3s/pull/8392) + * Fixed an issue that could cause k3s to attempt to remove members from the etcd cluster immediately following a cluster-reset/restore, if they were queued for removal at the time the snapshot was taken. +* Fix gofmt error [(#8439)](https://github.com/k3s-io/k3s/pull/8439) +* Added advertise address integration test [(#8344)](https://github.com/k3s-io/k3s/pull/8344) +* Added cluster reset from non bootstrap nodes on snapshot restore e2e test [(#8292)](https://github.com/k3s-io/k3s/pull/8292) +* Fix .github regex to skip drone runs on gh action bumps [(#8433)](https://github.com/k3s-io/k3s/pull/8433) +* Added error when cluster reset while using server flag [(#8385)](https://github.com/k3s-io/k3s/pull/8385) + * The user will receive a error when --cluster-reset with the --server flag +* Update kube-router [(#8423)](https://github.com/k3s-io/k3s/pull/8423) + * Update kube-router to v2.0.0-rc7 to fix performance issues +* Add SHA256 signatures of the install script [(#8312)](https://github.com/k3s-io/k3s/pull/8312) + * - Add SHA256 signatures of the install script. +* Add --image-service-endpoint flag [(#8279)](https://github.com/k3s-io/k3s/pull/8279) + * Add `--image-service-endpoint` flag to specify an external image service socket. +* Don't ignore assets in home dir if system assets exist [(#8458)](https://github.com/k3s-io/k3s/pull/8458) +* Pass SystemdCgroup setting through to nvidia runtime options [(#8470)](https://github.com/k3s-io/k3s/pull/8470) + * Fixed issue that would cause pods using nvidia container runtime to be killed after a few seconds, when using newer versions of nvidia-container-toolkit. +* Improve release docs - updated [(#8414)](https://github.com/k3s-io/k3s/pull/8414) +* Take IPFamily precedence based on order [(#8460)](https://github.com/k3s-io/k3s/pull/8460) +* Fix spellcheck problem [(#8507)](https://github.com/k3s-io/k3s/pull/8507) +* Network defaults are duplicated, remove one [(#8523)](https://github.com/k3s-io/k3s/pull/8523) +* Fix slemicro check for selinux [(#8526)](https://github.com/k3s-io/k3s/pull/8526) +* Update install.sh.sha256sum [(#8566)](https://github.com/k3s-io/k3s/pull/8566) +* System agent push tags fix [(#8568)](https://github.com/k3s-io/k3s/pull/8568) +* Fixed tailscale node IP dualstack mode in case of IPv4 only node [(#8524)](https://github.com/k3s-io/k3s/pull/8524) +* Server Token Rotation [(#8265)](https://github.com/k3s-io/k3s/pull/8265) + * Users can now rotate the server token using `k3s token rotate -t --new-token `. After command succeeds, all server nodes must be restarted with the new token. +* E2E Domain Drone Cleanup [(#8579)](https://github.com/k3s-io/k3s/pull/8579) +* Bump containerd to v1.7.7-k3s1 [(#8604)](https://github.com/k3s-io/k3s/pull/8604) +* Bump busybox to v1.36.1 [(#8602)](https://github.com/k3s-io/k3s/pull/8602) +* Migrate to using custom resource to store etcd snapshot metadata [(#8064)](https://github.com/k3s-io/k3s/pull/8064) +* Switch build target from main.go to a package. [(#8342)](https://github.com/k3s-io/k3s/pull/8342) +* Use IPv6 in case is the first configured IP with dualstack [(#8581)](https://github.com/k3s-io/k3s/pull/8581) +* Bump traefik, golang.org/x/net, google.golang.org/grpc [(#8624)](https://github.com/k3s-io/k3s/pull/8624) +* Update kube-router package in build script [(#8630)](https://github.com/k3s-io/k3s/pull/8630) +* Add etcd-only/control-plane-only server test and fix control-plane-only server crash [(#8638)](https://github.com/k3s-io/k3s/pull/8638) +* Use `version.Program` not K3s in token rotate logs [(#8653)](https://github.com/k3s-io/k3s/pull/8653) +* [Windows Port [(#7259)](https://github.com/k3s-io/k3s/pull/7259) +* Fix CloudDualStackNodeIPs feature-gate inconsistency [(#8667)](https://github.com/k3s-io/k3s/pull/8667) +* Re-enable etcd endpoint auto-sync [(#8675)](https://github.com/k3s-io/k3s/pull/8675) +* Manually requeue configmap reconcile when no nodes have reconciled snapshots [(#8683)](https://github.com/k3s-io/k3s/pull/8683) +* Update to v1.28.3 and Go to v1.20.10 [(#8682)](https://github.com/k3s-io/k3s/pull/8682) +* Fix s3 snapshot restore [(#8729)](https://github.com/k3s-io/k3s/pull/8729) + +----- +## Release [v1.28.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.2+k3s1) + + +This release updates Kubernetes to v1.28.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1281). + +### Changes since v1.28.1+k3s1: + +* Update channel for version v1.28 [(#8305)](https://github.com/k3s-io/k3s/pull/8305) +* Bump kine to v0.10.3 [(#8323)](https://github.com/k3s-io/k3s/pull/8323) +* Update to v1.28.2 and go v1.20.8 [(#8364)](https://github.com/k3s-io/k3s/pull/8364) + * Bump embedded containerd to v1.7.6 + * Bump embedded stargz-snapshotter plugin to latest + * Fixed intermittent drone CI failures due to race conditions in test environment setup scripts + * Fixed CI failures due to changes to api discovery changes in Kubernetes 1.28 + +----- +## Release [v1.28.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.28.1+k3s1) + +This release is K3S's first in the v1.28 line. This release updates Kubernetes to v1.28.1. + +:::warning Important +This release includes remediation for CVE-2023-32187, a potential Denial of Service attack vector on K3s servers. See https://github.com/k3s-io/k3s/security/advisories/GHSA-m4hf-6vgr-75r2 for more information, including documentation on changes in behavior that harden clusters against this vulnerability. +::: + +:::danger Critical Regression +Kubernetes v1.28 contains a critical regression ([kubernetes/kubernetes#120247](https://github.com/kubernetes/kubernetes/issues/120247)) that causes init containers to run at the same time as app containers following a restart of the node. This issue will be fixed in v1.28.2. We do not recommend using K3s v1.28 at this time if your application depends on init containers. +::: + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1270). + +### Changes since v1.27.5+k3s1: + +* Update to v1.28.1 [(#8239)](https://github.com/k3s-io/k3s/pull/8239) +* CLI Removal for v1.28.0 [(#8203)](https://github.com/k3s-io/k3s/pull/8203) +* Secrets Encryption V3 [(#8111)](https://github.com/k3s-io/k3s/pull/8111) +* Add new CLI flag to disable TLS SAN CN filtering [(#8252)](https://github.com/k3s-io/k3s/pull/8252) + * Added a new `--tls-san-security` option. +* Add RWMutex to address controller [(#8268)](https://github.com/k3s-io/k3s/pull/8268) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.29.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.29.X.md new file mode 100644 index 000000000..1e8dcbdf1 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.29.X.md @@ -0,0 +1,485 @@ +--- +hide_table_of_contents: true +sidebar_position: 4 +--- + +# v1.29.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.29.12+k3s1](v1.29.X.md#release-v12912k3s1) | Dec 18 2024| [v1.29.12](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v12912) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1](https://github.com/opencontainers/runc/releases/tag/v1.2.1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.12.0](https://github.com/coredns/coredns/releases/tag/v1.12.0) | [v0.15.15](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.15) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.29.11+k3s1](v1.29.X.md#release-v12911k3s1) | Dec 04 2024| [v1.29.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v12911) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1](https://github.com/opencontainers/runc/releases/tag/v1.2.1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.15.15](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.15) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.29.10+k3s1](v1.29.X.md#release-v12910k3s1) | Oct 26 2024| [v1.29.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v12910) | [v0.13.2](https://github.com/k3s-io/kine/releases/tag/v0.13.2) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.22-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.22-k3s1) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.15.15](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.15) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.29.9+k3s1](v1.29.X.md#release-v1299k3s1) | Sep 19 2024| [v1.29.9](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1299) | [v0.12.0](https://github.com/k3s-io/kine/releases/tag/v0.12.0) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.21-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.21-k3s2) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.8](https://github.com/traefik/traefik/releases/tag/v2.11.8) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.15.13](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.13) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.29.8+k3s1](v1.29.X.md#release-v1298k3s1) | Aug 21 2024| [v1.29.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1298) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.20-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.20-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.29.7+k3s1](v1.29.X.md#release-v1297k3s1) | Jul 31 2024| [v1.29.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1297) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.29.6+k3s2](v1.29.X.md#release-v1296k3s2) | Jul 03 2024| [v1.29.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1296) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1) | [v1.1.12-](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.29.6+k3s1](v1.29.X.md#release-v1296k3s1) | Jun 25 2024| [v1.29.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1296) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.2](https://github.com/flannel-io/flannel/releases/tag/v0.25.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.10](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.10) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.29.5+k3s1](v1.29.X.md#release-v1295k3s1) | May 22 2024| [v1.29.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1295) | [v0.11.7](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.29.4+k3s1](v1.29.X.md#release-v1294k3s1) | Apr 25 2024| [v1.29.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1294) | [v0.11.7](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.29.3+k3s1](v1.29.X.md#release-v1293k3s1) | Mar 25 2024| [v1.29.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1293) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.9](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.29.2+k3s1](v1.29.X.md#release-v1292k3s1) | Feb 29 2024| [v1.29.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1292) | [v0.11.4](https://github.com/k3s-io/kine/releases/tag/v0.11.4) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.12-k3s1](https://github.com/k3s-io/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.29.1+k3s2](v1.29.X.md#release-v1291k3s2) | Feb 06 2024| [v1.29.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1291) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.0](https://github.com/flannel-io/flannel/releases/tag/v0.24.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.8](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.8) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | +| [v1.29.0+k3s1](v1.29.X.md#release-v1290k3s1) | Dec 22 2023| [v1.29.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#v1290) | [v0.11.0](https://github.com/k3s-io/kine/releases/tag/v0.11.0) | [3.42.0](https://sqlite.org/releaselog/3_42_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.11-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.11-k3s2) | [v1.1.10](https://github.com/opencontainers/runc/releases/tag/v1.1.10) | [v0.24.0](https://github.com/flannel-io/flannel/releases/tag/v0.24.0) | [v0.6.3](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.6.3) | [v2.10.5](https://github.com/traefik/traefik/releases/tag/v2.10.5) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.15.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.4) | [v0.0.24](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.24) | + +
+ +## Release [v1.29.12+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.12+k3s1) + + +This release updates Kubernetes to v1.29.12, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v12911). + +### Changes since v1.29.11+k3s1: + +* Fix secrets-encrypt reencrypt timeout error [(#11440)](https://github.com/k3s-io/k3s/pull/11440) +* Remove experimental from embedded-registry flag [(#11446)](https://github.com/k3s-io/k3s/pull/11446) +* Update coredns to 1.12.0 [(#11456)](https://github.com/k3s-io/k3s/pull/11456) +* Rework loadbalancer server selection logic [(#11459)](https://github.com/k3s-io/k3s/pull/11459) + * The embedded client loadbalancer that handles connectivity to control-plane elements has been extensively reworked for improved performance, reliability, and observability. +* Add node-internal-dns/node-external-dns address pass-through support … [(#11466)](https://github.com/k3s-io/k3s/pull/11466) +* Update to v1.29.12-k3s1 and Go 1.22.9 [(#11460)](https://github.com/k3s-io/k3s/pull/11460) + +----- +## Release [v1.29.11+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.11+k3s1) + + +This release updates Kubernetes to v1.29.11, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v12910). + +### Changes since v1.29.10+k3s1: + +* Backport E2E GHA fixes [(#11229)](https://github.com/k3s-io/k3s/pull/11229) +* Backports for 2024-11 [(#11263)](https://github.com/k3s-io/k3s/pull/11263) +* Update flannel and base cni plugins version [(#11249)](https://github.com/k3s-io/k3s/pull/11249) +* Bump to latest k3s-root version in scripts/version.sh [(#11300)](https://github.com/k3s-io/k3s/pull/11300) +* More backports for 2024-11 [(#11309)](https://github.com/k3s-io/k3s/pull/11309) +* Fix issue with loadbalancer failover to default server [(#11326)](https://github.com/k3s-io/k3s/pull/11326) +* Update Kubernetes to v1.29.11-k3s1 [(#11370)](https://github.com/k3s-io/k3s/pull/11370) +* Bump containerd to -k3s2 to fix rewrites [(#11405)](https://github.com/k3s-io/k3s/pull/11405) + +----- +## Release [v1.29.10+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.10+k3s1) + + +This release updates Kubernetes to v1.29.10, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1299). + +### Changes since v1.29.9+k3s1: + +* Add int test for flannel-ipv6masq [(#10905)](https://github.com/k3s-io/k3s/pull/10905) +* Bump Wharfie to v0.6.7 [(#10976)](https://github.com/k3s-io/k3s/pull/10976) +* Add user path to runtimes search [(#11004)](https://github.com/k3s-io/k3s/pull/11004) +* Add e2e test for advanced fields in services [(#11021)](https://github.com/k3s-io/k3s/pull/11021) +* Launch private registry with init [(#11046)](https://github.com/k3s-io/k3s/pull/11046) +* Backports for 2024-10 [(#11062)](https://github.com/k3s-io/k3s/pull/11062) +* Allow additional Rootless CopyUpDirs through K3S_ROOTLESS_COPYUPDIRS [(#11043)](https://github.com/k3s-io/k3s/pull/11043) +* Bump containerd to v1.7.22 [(#11074)](https://github.com/k3s-io/k3s/pull/11074) +* Simplify svclb ds [(#11084)](https://github.com/k3s-io/k3s/pull/11084) +* Add the nvidia runtime cdi [(#11094)](https://github.com/k3s-io/k3s/pull/11094) +* Revert "Make svclb as simple as possible" [(#11114)](https://github.com/k3s-io/k3s/pull/11114) +* Fixes "file exists" error from CNI bins when upgrading k3s [(#11127)](https://github.com/k3s-io/k3s/pull/11127) +* Update to Kubernetes v1.29.10-k3s1 and Go 1.22.8 [(#11160)](https://github.com/k3s-io/k3s/pull/11160) + +----- +## Release [v1.29.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.9+k3s1) + + +This release updates Kubernetes to v1.29.9, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1298). + +### Changes since v1.29.8+k3s1: + +* Update CNI plugins version [(#10819)](https://github.com/k3s-io/k3s/pull/10819) +* Backports for 2024-09 [(#10844)](https://github.com/k3s-io/k3s/pull/10844) +* Testing And Secrets-Encryption Backports for 2024-09 [(#10803)](https://github.com/k3s-io/k3s/pull/10803) + * Update to newer OS images for install testing + * Fix caching name for e2e vagrant box + * Fix deploy latest commit on E2E tests + * Remove secrets encryption controller #10612 + * DRY E2E Upgrade test setup + * Cover edge case when on new minor release for E2E upgrade test +* Fix hosts.toml header var [(#10873)](https://github.com/k3s-io/k3s/pull/10873) +* Update to v1.29.9-k3s1 and Go 1.22.6 [(#10885)](https://github.com/k3s-io/k3s/pull/10885) +* Update Kubernetes to v1.29.9-k3s2 [(#10908)](https://github.com/k3s-io/k3s/pull/10908) + +----- +## Release [v1.29.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.8+k3s1) + + +This release updates Kubernetes to v1.29.8, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1297). + +### Changes since v1.29.7+k3s1: + +* Fixing setproctitle function [(#10623)](https://github.com/k3s-io/k3s/pull/10623) +* Bump docker/docker to v25.0.6 [(#10650)](https://github.com/k3s-io/k3s/pull/10650) +* Backports for 2024-08 release cycle [(#10665)](https://github.com/k3s-io/k3s/pull/10665) + * Use pagination when listing large numbers of resources + * Fix multiple issues with servicelb + * Remove deprecated use of wait. functions + * Wire lasso metrics up to metrics endpoint +* Backports for August 2024 [(#10672)](https://github.com/k3s-io/k3s/pull/10672) +* Bump containerd to v1.7.20 [(#10661)](https://github.com/k3s-io/k3s/pull/10661) +* Add tolerations support for DaemonSet pods [(#10704)](https://github.com/k3s-io/k3s/pull/10704) + * **New Feature**: Users can now define Kubernetes tolerations for ServiceLB DaemonSet directly in the `svccontroller.k3s.cattle.io/tolerations` annotation on services. +* Update to v1.29.8-k3s1 and Go 1.22.5 [(#10720)](https://github.com/k3s-io/k3s/pull/10720) + +----- +## Release [v1.29.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.7+k3s1) + + +This release updates Kubernetes to v1.29.7, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1296). + +### Changes since v1.29.6+k3s2: + +* Backports for 2024-07 release cycle [(#10498)](https://github.com/k3s-io/k3s/pull/10498) + * Bump k3s-root to v0.14.0 + * Bump github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.7 + * Bump Local Path Provisioner version + * Ensure remotedialer kubelet connections use kubelet bind address + * Chore: Bump Trivy version + * Add etcd s3 config secret implementation +* July Test Backports [(#10508)](https://github.com/k3s-io/k3s/pull/10508) +* Update to v1.29.7-k3s1 and Go 1.22.5 [(#10539)](https://github.com/k3s-io/k3s/pull/10539) +* Fix issues loading data-dir value from env vars or dropping config files [(#10597)](https://github.com/k3s-io/k3s/pull/10597) + +----- +## Release [v1.29.6+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.29.6+k3s2) + + +This release updates Kubernetes to v1.29.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1296). + +### Changes since v1.29.6+k3s1: + +* Update flannel to v0.25.4 and fixed issue with IPv6 mask [(#10427)](https://github.com/k3s-io/k3s/pull/10427) + +----- +## Release [v1.29.6+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.6+k3s1) + + +This release updates Kubernetes to v1.29.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1295). + +### Changes since v1.29.5+k3s1: + +* Fix bug when using tailscale config by file [(#10142)](https://github.com/k3s-io/k3s/pull/10142) +* Bump flannel version to v0.25.2 [(#10220)](https://github.com/k3s-io/k3s/pull/10220) +* Update kube-router version to v2.1.2 [(#10181)](https://github.com/k3s-io/k3s/pull/10181) +* Improve tailscale test & add extra log in e2e tests [(#10212)](https://github.com/k3s-io/k3s/pull/10212) +* Backports for 2024-06 release cycle [(#10249)](https://github.com/k3s-io/k3s/pull/10249) + * Add WithSkipMissing to not fail import on missing blobs + * Use fixed stream server bind address for cri-dockerd + * Switch stargz over to cri registry config_path + * Bump to containerd v1.7.17, etcd v3.5.13 + * Bump spegel version + * Fix issue with externalTrafficPolicy: Local for single-stack services on dual-stack nodes + * ServiceLB now sets the priorityClassName on svclb pods to `system-node-critical` by default. This can be overridden on a per-service basis via the `svccontroller.k3s.cattle.io/priorityclassname` annotation. + * Bump minio-go to v7.0.70 + * Bump kine to v0.11.9 to fix pagination + * Update valid resolv conf + * Add missing kernel config check + * Symlinked sub-directories are now respected when scanning Auto-Deploying Manifests (AddOns) + * Fix bug: allow helm controller set owner reference + * Bump klipper-helm image for tls secret support + * Fix issue with k3s-etcd informers not starting + * `--Enable-pprof` can now be set on agents to enable the debug/pprof endpoints. When set, agents will listen on the supervisor port. + * `--Supervisor-metrics` can now be set on servers to enable serving internal metrics on the supervisor endpoint; when set agents will listen on the supervisor port. + * Fix netpol crash when node remains tainted uninitialized + * The embedded load-balancer will now fall back to trying all servers with health-checks ignored, if all servers have been marked unavailable due to failed health checks. +* More backports for 2024-06 release cycle [(#10288)](https://github.com/k3s-io/k3s/pull/10288) +* Add snapshot retention etcd-s3-folder fix [(#10316)](https://github.com/k3s-io/k3s/pull/10316) +* Add test for `isValidResolvConf` (#10302) [(#10329)](https://github.com/k3s-io/k3s/pull/10329) +* Fix race condition panic in loadbalancer.nextServer [(#10322)](https://github.com/k3s-io/k3s/pull/10322) +* Fix typo, use `rancher/permissions` [(#10298)](https://github.com/k3s-io/k3s/pull/10298) +* Expand GHA go caching to include newest release branch [(#10334)](https://github.com/k3s-io/k3s/pull/10334) +* Update Kubernetes to v1.29.6 [(#10348)](https://github.com/k3s-io/k3s/pull/10348) +* Fix agent supervisor port using apiserver port instead [(#10354)](https://github.com/k3s-io/k3s/pull/10354) +* Fix issue that allowed multiple simultaneous snapshots to be allowed [(#10376)](https://github.com/k3s-io/k3s/pull/10376) + +----- +## Release [v1.29.5+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.5+k3s1) + + +This release updates Kubernetes to v1.29.5, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1294). + +### Changes since v1.29.4+k3s1: + +* Update stable channel to v1.29.4+k3s1 [(#10031)](https://github.com/k3s-io/k3s/pull/10031) +* Add E2E Split Server to Drone, support parallel testing in Drone [(#9940)](https://github.com/k3s-io/k3s/pull/9940) +* Bump E2E opensuse leap to 15.6, fix btrfs test [(#10057)](https://github.com/k3s-io/k3s/pull/10057) +* Replace deprecated ruby function [(#10091)](https://github.com/k3s-io/k3s/pull/10091) +* Set correct release channel for e2e upgrade test [(#10106)](https://github.com/k3s-io/k3s/pull/10106) +* Windows changes [(#10115)](https://github.com/k3s-io/k3s/pull/10115) +* Update to v1.29.5-k3s1 and Go 1.21.9 [(#10108)](https://github.com/k3s-io/k3s/pull/10108) + +----- +## Release [v1.29.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.4+k3s1) + + +This release updates Kubernetes to v1.29.4, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1293). + +### Changes since v1.29.3+k3s1: + +* Send error response if member list cannot be retrieved [(#9722)](https://github.com/k3s-io/k3s/pull/9722) +* Respect cloud-provider fields set by kubelet [(#9721)](https://github.com/k3s-io/k3s/pull/9721) + * The k3s stub cloud provider now respects the kubelet's requested provider-id, instance type, and topology labels +* Fix error when image has already been pulled [(#9770)](https://github.com/k3s-io/k3s/pull/9770) +* Add a new error when kine is with disable apiserver or disable etcd [(#9766)](https://github.com/k3s-io/k3s/pull/9766) +* Bump k3s-root to v0.13.0 [(#9718)](https://github.com/k3s-io/k3s/pull/9718) +* Use ubuntu latest for better golang caching keys [(#9711)](https://github.com/k3s-io/k3s/pull/9711) +* Bump Trivy version [(#9780)](https://github.com/k3s-io/k3s/pull/9780) +* Move to ubuntu 23.10 for E2E tests [(#9755)](https://github.com/k3s-io/k3s/pull/9755) +* Update channel server [(#9808)](https://github.com/k3s-io/k3s/pull/9808) +* Add /etc/passwd and /etc/group to k3s docker image [(#9784)](https://github.com/k3s-io/k3s/pull/9784) +* Fix etcd snapshot reconcile for agentless servers [(#9809)](https://github.com/k3s-io/k3s/pull/9809) +* Add health-check support to loadbalancer [(#9757)](https://github.com/k3s-io/k3s/pull/9757) +* Add tls for kine [(#9572)](https://github.com/k3s-io/k3s/pull/9572) + * Kine is now able to use TLS +* Transition from deprecated pointer library to ptr [(#9801)](https://github.com/k3s-io/k3s/pull/9801) +* Remove old pinned dependencies [(#9806)](https://github.com/k3s-io/k3s/pull/9806) +* Several E2E Matrix improvements [(#9802)](https://github.com/k3s-io/k3s/pull/9802) +* Add certificate expiry check, events, and metrics [(#9772)](https://github.com/k3s-io/k3s/pull/9772) +* Add updatecli policy to update k3s-root [(#9844)](https://github.com/k3s-io/k3s/pull/9844) +* Bump Trivy version [(#9840)](https://github.com/k3s-io/k3s/pull/9840) +* Add workaround for containerd hosts.toml bug when passing config for default registry endpoint [(#9853)](https://github.com/k3s-io/k3s/pull/9853) +* Fix: agent volume in example docker compose [(#9838)](https://github.com/k3s-io/k3s/pull/9838) +* Bump spegel to v0.0.20-k3s1 [(#9863)](https://github.com/k3s-io/k3s/pull/9863) +* Add supervisor cert/key to rotate list [(#9832)](https://github.com/k3s-io/k3s/pull/9832) +* Add quotes to avoid useless updatecli updates [(#9877)](https://github.com/k3s-io/k3s/pull/9877) +* Bump containerd and cri-dockerd [(#9886)](https://github.com/k3s-io/k3s/pull/9886) + * The embedded containerd has been bumped to v1.7.15 + * The embedded cri-dockerd has been bumped to v0.3.12 +* Move etcd snapshot management CLI to request/response [(#9816)](https://github.com/k3s-io/k3s/pull/9816) + * The `k3s etcd-snapshot` command has been reworked for improved consistency. All snapshots operations are now performed by the server process, with the CLI acting as a client to initiate and report results. As a side effect, the CLI is now less noisy when managing snapshots. +* Improve etcd load-balancer startup behavior [(#9883)](https://github.com/k3s-io/k3s/pull/9883) +* Actually fix agent certificate rotation [(#9902)](https://github.com/k3s-io/k3s/pull/9902) +* Bump latest to v1.29.3+k3s1 [(#9909)](https://github.com/k3s-io/k3s/pull/9909) +* Update packaged manifests [(#9920)](https://github.com/k3s-io/k3s/pull/9920) + * Traefik has been bumped to v2.10.7. + * Traefik pod annotations are now set properly in the default chart values. + * The system-default-registry value now supports RFC2732 IPv6 literals. + * The local-path provisioner now defaults to creating `local` volumes, instead of `hostPath`. +* Allow Local path provisioner to read helper logs [(#9835)](https://github.com/k3s-io/k3s/pull/9835) +* Update kube-router to v2.1.0 [(#9926)](https://github.com/k3s-io/k3s/pull/9926) +* Match setup-go caching key in GitHub Actions [(#9890)](https://github.com/k3s-io/k3s/pull/9890) +* Add startup testlet on preloaded images [(#9941)](https://github.com/k3s-io/k3s/pull/9941) +* Update to v1.29.4-k3s1 and Go 1.21.9 [(#9960)](https://github.com/k3s-io/k3s/pull/9960) +* Fix on-demand snapshots timing out; not honoring folder [(#9984)](https://github.com/k3s-io/k3s/pull/9984) +* Make `/db/info` available anonymously from localhost [(#10001)](https://github.com/k3s-io/k3s/pull/10001) + +----- +## Release [v1.29.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.3+k3s1) + + +This release updates Kubernetes to v1.29.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1292). + +### Changes since v1.29.2+k3s1: + +* Testing ADR [(#9562)](https://github.com/k3s-io/k3s/pull/9562) +* Unit Testing Matrix and Actions bump [(#9479)](https://github.com/k3s-io/k3s/pull/9479) +* Update install test OS matrix [(#9480)](https://github.com/k3s-io/k3s/pull/9480) +* Update klipper-lb image version [(#9488)](https://github.com/k3s-io/k3s/pull/9488) +* Add an integration test for flannel-backend=none [(#9582)](https://github.com/k3s-io/k3s/pull/9582) +* Better GitHub CI caching strategy for golang [(#9495)](https://github.com/k3s-io/k3s/pull/9495) +* Correct formatting of GH PR sha256sum artifact [(#9472)](https://github.com/k3s-io/k3s/pull/9472) +* Rootless mode also bind service nodePort to host for LoadBalancer type [(#9512)](https://github.com/k3s-io/k3s/pull/9512) + * Rootless mode should also bind service nodePort to host for LoadBalancer type, matching UX of rootful mode. +* Fix coredns NodeHosts on dual-stack clusters [(#9584)](https://github.com/k3s-io/k3s/pull/9584) +* Tweak netpol node wait logs [(#9581)](https://github.com/k3s-io/k3s/pull/9581) +* Fix issue with etcd node name missing hostname [(#9522)](https://github.com/k3s-io/k3s/pull/9522) +* Bump helm-controller/klipper-helm versions [(#9595)](https://github.com/k3s-io/k3s/pull/9595) +* Update stable channel to v1.28.7+k3s1 [(#9615)](https://github.com/k3s-io/k3s/pull/9615) +* Reenable Install and Snapshotter Testing [(#9601)](https://github.com/k3s-io/k3s/pull/9601) +* Move docker tests into tests folder [(#9555)](https://github.com/k3s-io/k3s/pull/9555) +* Fix setup-go typo [(#9634)](https://github.com/k3s-io/k3s/pull/9634) +* Fix additional corner cases in registries handling [(#9556)](https://github.com/k3s-io/k3s/pull/9556) +* Fix snapshot prune [(#9502)](https://github.com/k3s-io/k3s/pull/9502) +* Use and version flannel/cni-plugin properly [(#9635)](https://github.com/k3s-io/k3s/pull/9635) + * The embedded flannel cni-plugin binary is now built and versioned separate from the rest of the cni plugins and the embedded flannel controller. +* Bump spegel [(#9599)](https://github.com/k3s-io/k3s/pull/9599) + * Bump spegel to v0.0.18-k3s3 + * Adds wildcard registry support + * Fixes issue with excessive CPU utilization while waiting for containerd to start + * Add env var to allow spegel mirroring of latest tag +* Chore(deps): Remediating CVEs found by trivy; CVE-2023-45142 on otelrestful and CVE-2023-48795 on golang.org/x/crypto [(#9513)](https://github.com/k3s-io/k3s/pull/9513) +* Fix: use correct wasm shims names [(#9519)](https://github.com/k3s-io/k3s/pull/9519) +* Fix wildcard with embedded registry test [(#9649)](https://github.com/k3s-io/k3s/pull/9649) +* Disable color outputs using `NO_COLOR` env var [(#9357)](https://github.com/k3s-io/k3s/pull/9357) + * To enable raw output for the `check-config` subcommand, you may now set NO_COLOR=1 +* Improve tailscale e2e test [(#9586)](https://github.com/k3s-io/k3s/pull/9586) +* Adjust first node-ip based on configured clusterCIDR [(#9520)](https://github.com/k3s-io/k3s/pull/9520) +* Bump Trivy version [(#9528)](https://github.com/k3s-io/k3s/pull/9528) +* Include flannel version in flannel cni plugin version [(#9648)](https://github.com/k3s-io/k3s/pull/9648) + * The flannel controller version is now reported as build metadata on the flannel cni plugin version. +* Enable E2E tests on GitHub Actions [(#9660)](https://github.com/k3s-io/k3s/pull/9660) +* Bump metrics-server to v0.7.0 [(#9673)](https://github.com/k3s-io/k3s/pull/9673) +* Bump upload and download actions to v4 [(#9666)](https://github.com/k3s-io/k3s/pull/9666) +* Warn and suppress duplicate registry mirror endpoints [(#9697)](https://github.com/k3s-io/k3s/pull/9697) + * K3s will now warn and suppress duplicate entries in the mirror endpoint list for a registry. Containerd does not support listing the same endpoint multiple times as a mirror for a single upstream registry. +* Remove repetitive words [(#9671)](https://github.com/k3s-io/k3s/pull/9671) +* Run Subset of Docker tests in GitHub Actions [(#9698)](https://github.com/k3s-io/k3s/pull/9698) +* Fix wildcard entry upstream fallback [(#9729)](https://github.com/k3s-io/k3s/pull/9729) +* Update to v1.29.3-k3s1 and Go 1.21.8 [(#9747)](https://github.com/k3s-io/k3s/pull/9747) + +----- +## Release [v1.29.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.2+k3s1) + + +This release updates Kubernetes to v1.29.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1291). + +### Changes since v1.29.1+k3s2: + +* Bump Local Path Provisioner version [(#8953)](https://github.com/k3s-io/k3s/pull/8953) +* Add ability to install K3s PR Artifact from GitHub [(#9185)](https://github.com/k3s-io/k3s/pull/9185) + * Adds `INSTALL_K3S_PR` option to install a build of K3s from any open PR with CI approval +* Bump Trivy version [(#9237)](https://github.com/k3s-io/k3s/pull/9237) +* Bump codecov/codecov-action from 3 to 4 [(#9353)](https://github.com/k3s-io/k3s/pull/9353) +* Update stable channel [(#9388)](https://github.com/k3s-io/k3s/pull/9388) +* Fix snapshot reconcile retry [(#9318)](https://github.com/k3s-io/k3s/pull/9318) +* Add check for etcd-snapshot-dir and fix panic in Walk [(#9317)](https://github.com/k3s-io/k3s/pull/9317) +* Bump CNI plugins to v1.4.0 [(#9249)](https://github.com/k3s-io/k3s/pull/9249) +* Fix issue with coredns node hosts controller [(#9354)](https://github.com/k3s-io/k3s/pull/9354) + * Fixed issue that could cause coredns pods to fail to start when the embedded helm controller is disabled, due to the configmap not being updated with node hosts entries. +* Fix on-demand snapshots on ipv6-only nodes [(#9247)](https://github.com/k3s-io/k3s/pull/9247) +* Bump flannel version [(#9395)](https://github.com/k3s-io/k3s/pull/9395) + * Bumped flannel to v0.24.2 +* Build: Align drone base images [(#8959)](https://github.com/k3s-io/k3s/pull/8959) +* Changed how lastHeartBeatTime works in the etcd condition [(#9263)](https://github.com/k3s-io/k3s/pull/9263) +* Runtimes refactor using exec.LookPath [(#9311)](https://github.com/k3s-io/k3s/pull/9311) + * Directories containing runtimes need to be included in the $PATH environment variable for effective runtime detection. +* Bump cri-dockerd to fix compat with Docker Engine 25 [(#9290)](https://github.com/k3s-io/k3s/pull/9290) +* Add codcov secret for integration tests on Push [(#9422)](https://github.com/k3s-io/k3s/pull/9422) +* Allow executors to define `containerd` and `cridockerd` behavior [(#9184)](https://github.com/k3s-io/k3s/pull/9184) +* Update Kube-router to v2.0.1 [(#9396)](https://github.com/k3s-io/k3s/pull/9396) +* : Test_UnitApplyContainerdQoSClassConfigFileIfPresent (Created) [(#8945)](https://github.com/k3s-io/k3s/pull/8945) +* Readd `k3s secrets-encrypt rotate-keys` with correct support for KMSv2 GA [(#9340)](https://github.com/k3s-io/k3s/pull/9340) +* Fix iptables check when sbin isn't in user PATH [(#9344)](https://github.com/k3s-io/k3s/pull/9344) +* Don't create NodePasswordValidationFailed event if agent is disabled [(#9312)](https://github.com/k3s-io/k3s/pull/9312) + * The `NodePasswordValidationFailed` Events will no longer be emitted, if the agent is disabled. +* Expose rootless state dir under ~/.rancher/k3s/rootless [(#9308)](https://github.com/k3s-io/k3s/pull/9308) + * When running k3s in rootless mode, expose rootlesskit's state directory as `~/.rancher/k3s/rootless` +* Expose rootless containerd socket directories for external access [(#9309)](https://github.com/k3s-io/k3s/pull/9309) + * Mount k3s rootless containerd & cri-dockerd socket directories to `$XDG_RUNTIME_DIR/k3s/containerd` and `$XDG_RUNTIME_DIR/k3s/cri-dockerd` respectively. +* Bump kine and set NotifyInterval to what the apiserver expects [(#9349)](https://github.com/k3s-io/k3s/pull/9349) +* Update Kubernetes to v1.29.2 [(#9493)](https://github.com/k3s-io/k3s/pull/9493) +* Fix drone publish for arm [(#9503)](https://github.com/k3s-io/k3s/pull/9503) +* Remove failing Drone step [(#9517)](https://github.com/k3s-io/k3s/pull/9517) +* Restore original order of agent startup functions [(#9539)](https://github.com/k3s-io/k3s/pull/9539) +* Fix netpol startup when flannel is disabled [(#9571)](https://github.com/k3s-io/k3s/pull/9571) + +----- +## Release [v1.29.1+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.29.1+k3s2) + + +This release updates Kubernetes to v1.29.1, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1290). + +**Important Notes** + +Addresses the runc CVE: [CVE-2024-21626](https://nvd.nist.gov/vuln/detail/CVE-2024-21626) by updating runc to v1.1.12. + +### Changes since v1.29.0+k3s1: + +* Bump Sonobuoy version [(#8910)](https://github.com/k3s-io/k3s/pull/8910) +* Bump actions/setup-go from 4 to 5 [(#9036)](https://github.com/k3s-io/k3s/pull/9036) +* Chore: Update Code of Conduct to Redirect to CNCF CoC [(#9104)](https://github.com/k3s-io/k3s/pull/9104) + * NONE +* Update stable channel to v1.28.5+k3s1 and add v1.29 channel [(#9110)](https://github.com/k3s-io/k3s/pull/9110) +* Added support for env *_PROXY variables for agent loadbalancer [(#9070)](https://github.com/k3s-io/k3s/pull/9070) + * HTTP_PROXY, HTTPS_PROXY and NO_PROXY environment variables are now taken into account by the agent loadbalancer if K3S_AGENT_HTTP_PROXY_ALLOWED env variable is set to true. + * This however doesn't affect local requests as the function used prevents that: https://pkg.go.dev/net/http#ProxyFromEnvironment. +* Add a retry around updating a secrets-encrypt node annotations [(#9039)](https://github.com/k3s-io/k3s/pull/9039) +* Silence SELinux warning on INSTALL_K3S_SKIP_SELINUX_RPM [(#8703)](https://github.com/k3s-io/k3s/pull/8703) +* Add ServiceLB support for PodHostIPs FeatureGate [(#8917)](https://github.com/k3s-io/k3s/pull/8917) +* Added support for env *_PROXY variables for agent loadbalancer [(#9118)](https://github.com/k3s-io/k3s/pull/9118) +* Redirect error stream to null when checking nm-cloud systemd unit [(#8815)](https://github.com/k3s-io/k3s/pull/8815) + * Remove confusing "nm-cloud-setup.service: No such file or directory" journalctl log +* Dockerfile.dapper: set $HOME properly [(#9090)](https://github.com/k3s-io/k3s/pull/9090) +* Add system-agent-installer-k3s step to GA release instructions [(#9153)](https://github.com/k3s-io/k3s/pull/9153) +* Fix install script checksum [(#9159)](https://github.com/k3s-io/k3s/pull/9159) +* Fix the OTHER etcd snapshot s3 log message that prints the wrong variable [(#8944)](https://github.com/k3s-io/k3s/pull/8944) +* Handle logging flags when parsing kube-proxy args [(#8916)](https://github.com/k3s-io/k3s/pull/8916) +* Fix nil map in full snapshot configmap reconcile [(#9049)](https://github.com/k3s-io/k3s/pull/9049) +* Add support for containerd cri registry config_path [(#8973)](https://github.com/k3s-io/k3s/pull/8973) +* Add more paths to crun runtime detection [(#9086)](https://github.com/k3s-io/k3s/pull/9086) +* Add runtime checking of golang version [(#9054)](https://github.com/k3s-io/k3s/pull/9054) +* Fix OS PRETTY_NAME on tagged releases [(#9062)](https://github.com/k3s-io/k3s/pull/9062) +* Print error when downloading file error inside install script [(#6874)](https://github.com/k3s-io/k3s/pull/6874) +* Wait for cloud-provider taint to be gone before starting the netpol controller [(#9076)](https://github.com/k3s-io/k3s/pull/9076) +* Bump Trivy version [(#8812)](https://github.com/k3s-io/k3s/pull/8812) +* Use `ipFamilyPolicy: RequireDualStack` for dual-stack kube-dns [(#8984)](https://github.com/k3s-io/k3s/pull/8984) +* Handle etcd status condition when node is not ready and disable etcd [(#9084)](https://github.com/k3s-io/k3s/pull/9084) +* Update s3 e2e test [(#9025)](https://github.com/k3s-io/k3s/pull/9025) +* Add e2e startup test for rootless k3s [(#8383)](https://github.com/k3s-io/k3s/pull/8383) +* Add spegel distributed registry mirror [(#8977)](https://github.com/k3s-io/k3s/pull/8977) +* Bump quic-go for CVE-2023-49295 [(#9208)](https://github.com/k3s-io/k3s/pull/9208) +* Enable network policy controller metrics [(#9195)](https://github.com/k3s-io/k3s/pull/9195) + * Kube-router network policy controller metrics are now exposed via the default node metrics endpoint +* Fix nonexistent dependency repositories [(#9213)](https://github.com/k3s-io/k3s/pull/9213) +* Move proxy dialer out of init() and fix crash when using `K3S_AGENT_HTTP_PROXY_ALLOWED=true` [(#9219)](https://github.com/k3s-io/k3s/pull/9219) +* Error getting node in setEtcdStatusCondition [(#9210)](https://github.com/k3s-io/k3s/pull/9210) +* Update to v1.29.1 and Go 1.21.6 [(#9259)](https://github.com/k3s-io/k3s/pull/9259) +* New stale action [(#9278)](https://github.com/k3s-io/k3s/pull/9278) +* Fix handling of bare hostname or IP as endpoint address in registries.yaml [(#9323)](https://github.com/k3s-io/k3s/pull/9323) +* Bump runc to v1.1.12 and helm-controller to v0.15.7 [(#9332)](https://github.com/k3s-io/k3s/pull/9332) +* Bump helm-controller to fix issue with ChartContent [(#9345)](https://github.com/k3s-io/k3s/pull/9345) + +----- +## Release [v1.29.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.29.0+k3s1) + + +This release is K3S's first in the v1.29 line. This release updates Kubernetes to v1.29.0. + +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#urgent-upgrade-notes). + +:::warning Important +This release removes the experimental `rotate-keys` subcommand due to changes in Kubernetes upstream for [KMSv2](https://github.com/kubernetes/kubernetes/issues/117728), the subcommand should be added back in future releases. +::: + +:::warning Important +This release also removes the `multi-cluster-cidr` flag, since the support for this alpha feature has been removed completely from [Kubernetes upstream](https://groups.google.com/g/kubernetes-sig-network/c/nts1xEZ--gQ/m/2aTOUNFFAAAJ), this flag should be removed from the configuration before upgrade. +::: + + +### Changes since v1.28.4+k3s2: + +* Fix overlapping address range [(#8913)](https://github.com/k3s-io/k3s/pull/8913) +* Modify CONTRIBUTING.md guide [(#8954)](https://github.com/k3s-io/k3s/pull/8954) +* Nov 2023 stable channel update [(#9022)](https://github.com/k3s-io/k3s/pull/9022) +* Default runtime and runtime classes for wasm/nvidia/crun [(#8936)](https://github.com/k3s-io/k3s/pull/8936) + * Added runtime classes for wasm/nvidia/crun + * Added default runtime flag for containerd +* Bump containerd/runc to v1.7.10-k3s1/v1.1.10 [(#8962)](https://github.com/k3s-io/k3s/pull/8962) +* Allow setting default-runtime on servers [(#9027)](https://github.com/k3s-io/k3s/pull/9027) +* Bump containerd to v1.7.11 [(#9040)](https://github.com/k3s-io/k3s/pull/9040) +* Remove GA feature-gates [(#8970)](https://github.com/k3s-io/k3s/pull/8970) +* Only publish to code_cov on merged E2E builds [(#9051)](https://github.com/k3s-io/k3s/pull/9051) +* Update Kubernetes to v1.29.0+k3s1 [(#9052)](https://github.com/k3s-io/k3s/pull/9052) +* Update flannel to v0.24.0 and remove multiclustercidr flag [(#9075)](https://github.com/k3s-io/k3s/pull/9075) +* Remove rotate-keys subcommand [(#9079)](https://github.com/k3s-io/k3s/pull/9079) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.30.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.30.X.md new file mode 100644 index 000000000..5d89fb45a --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.30.X.md @@ -0,0 +1,271 @@ +--- +hide_table_of_contents: true +sidebar_position: 3 +--- + +# v1.30.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.30.8+k3s1](v1.30.X.md#release-v1308k3s1) | Dec 18 2024| [v1.30.8](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1308) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1](https://github.com/opencontainers/runc/releases/tag/v1.2.1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.12.0](https://github.com/coredns/coredns/releases/tag/v1.12.0) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.30.7+k3s1](v1.30.X.md#release-v1307k3s1) | Dec 04 2024| [v1.30.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1307) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1](https://github.com/opencontainers/runc/releases/tag/v1.2.1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.30.6+k3s1](v1.30.X.md#release-v1306k3s1) | Oct 26 2024| [v1.30.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1306) | [v0.13.2](https://github.com/k3s-io/kine/releases/tag/v0.13.2) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.22-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.22-k3s1) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.30.5+k3s1](v1.30.X.md#release-v1305k3s1) | Sep 19 2024| [v1.30.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1305) | [v0.12.0](https://github.com/k3s-io/kine/releases/tag/v0.12.0) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.21-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.21-k3s2) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.8](https://github.com/traefik/traefik/releases/tag/v2.11.8) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.16.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.4) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.30.4+k3s1](v1.30.X.md#release-v1304k3s1) | Aug 21 2024| [v1.30.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1304) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.20-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.20-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.1) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.30.3+k3s1](v1.30.X.md#release-v1303k3s1) | Jul 31 2024| [v1.30.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1303) | [v0.11.11](https://github.com/k3s-io/kine/releases/tag/v0.11.11) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.1) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.30.2+k3s2](v1.30.X.md#release-v1302k3s2) | Jul 03 2024| [v1.30.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1302) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.1) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.30.2+k3s1](v1.30.X.md#release-v1302k3s1) | Jun 25 2024| [v1.30.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1302) | [v0.11.9](https://github.com/k3s-io/kine/releases/tag/v0.11.9) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.17-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.17-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.2](https://github.com/flannel-io/flannel/releases/tag/v0.25.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.1) | [v0.0.27](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.27) | +| [v1.30.1+k3s1](v1.30.X.md#release-v1301k3s1) | May 22 2024| [v1.30.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1301) | [v0.11.8-0.20240430184817-f9ce6f8da97b](https://github.com/k3s-io/kine/releases/tag/v0.11.8-0.20240430184817-f9ce6f8da97b) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1) | [v1.1.12-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.1.12-k3s1) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.1-0.20240502205943-2f32059d43e6](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.1-0.20240502205943-2f32059d43e6) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | +| [v1.30.0+k3s1](v1.30.X.md#release-v1300k3s1) | May 10 2024| [v1.30.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1300) | [v0.11.8](https://github.com/k3s-io/kine/releases/tag/v0.11.7) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.9-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.9-k3s1) | [v1.7.15-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.15-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.24.2](https://github.com/flannel-io/flannel/releases/tag/v0.24.2) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.1](https://github.com/k3s-io/helm-controller/releases/tag/v0.15.9) | [v0.0.26](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.26) | + +
+ +## Release [v1.30.8+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.8+k3s1) + + +This release updates Kubernetes to v1.30.8, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1307). + +### Changes since v1.30.7+k3s1: + +* Fix secrets-encrypt reencrypt timeout error [(#11441)](https://github.com/k3s-io/k3s/pull/11441) +* Remove experimental from embedded-registry flag [(#11445)](https://github.com/k3s-io/k3s/pull/11445) +* Update coredns to 1.12.0 [(#11455)](https://github.com/k3s-io/k3s/pull/11455) +* Rework loadbalancer server selection logic [(#11458)](https://github.com/k3s-io/k3s/pull/11458) + * The embedded client loadbalancer that handles connectivity to control-plane elements has been extensively reworked for improved performance, reliability, and observability. +* Add node-internal-dns/node-external-dns address pass-through support … [(#11465)](https://github.com/k3s-io/k3s/pull/11465) +* Update to v1.30.8-k3s1 and Go 1.22.9 [(#11461)](https://github.com/k3s-io/k3s/pull/11461) + +----- +## Release [v1.30.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.7+k3s1) + + +This release updates Kubernetes to v1.30.7, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1306). + +### Changes since v1.30.6+k3s1: + +* Backport E2E GHA fixes [(#11227)](https://github.com/k3s-io/k3s/pull/11227) +* Backports for 2024-11 [(#11262)](https://github.com/k3s-io/k3s/pull/11262) +* Update flannel and base cni plugins version [(#11248)](https://github.com/k3s-io/k3s/pull/11248) +* Bump to latest k3s-root version in scripts/version.sh [(#11299)](https://github.com/k3s-io/k3s/pull/11299) +* More backports for 2024-11 [(#11308)](https://github.com/k3s-io/k3s/pull/11308) +* Fix issue with loadbalancer failover to default server [(#11325)](https://github.com/k3s-io/k3s/pull/11325) +* Update Kubernetes to v1.30.7-k3s1 [(#11371)](https://github.com/k3s-io/k3s/pull/11371) +* Bump containerd to -k3s2 to fix rewrites [(#11404)](https://github.com/k3s-io/k3s/pull/11404) + +----- +## Release [v1.30.6+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.6+k3s1) + + +This release updates Kubernetes to v1.30.6, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1305). + +### Changes since v1.30.5+k3s1: + +* Add int test for flannel-ipv6masq [(#10903)](https://github.com/k3s-io/k3s/pull/10903) +* Bump Wharfie to v0.6.7 [(#10975)](https://github.com/k3s-io/k3s/pull/10975) +* Add user path to runtimes search [(#11003)](https://github.com/k3s-io/k3s/pull/11003) +* Add e2e test for advanced fields in services [(#11022)](https://github.com/k3s-io/k3s/pull/11022) +* Launch private registry with init [(#11047)](https://github.com/k3s-io/k3s/pull/11047) +* Backports for 2024-10 [(#11061)](https://github.com/k3s-io/k3s/pull/11061) +* Allow additional Rootless CopyUpDirs through K3S_ROOTLESS_COPYUPDIRS [(#11044)](https://github.com/k3s-io/k3s/pull/11044) +* Bump containerd to v1.7.22 [(#11073)](https://github.com/k3s-io/k3s/pull/11073) +* Simplify svclb ds [(#11083)](https://github.com/k3s-io/k3s/pull/11083) +* Add the nvidia runtime cdi [(#11092)](https://github.com/k3s-io/k3s/pull/11092) +* Revert "Make svclb as simple as possible" [(#11113)](https://github.com/k3s-io/k3s/pull/11113) +* Fixes "file exists" error from CNI bins when upgrading k3s [(#11126)](https://github.com/k3s-io/k3s/pull/11126) +* Update to Kubernetes v1.30.6-k3s1 and Go 1.22.8 [(#11162)](https://github.com/k3s-io/k3s/pull/11162) + +----- +## Release [v1.30.5+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.5+k3s1) + + +This release updates Kubernetes to v1.30.5, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1304). + +### Changes since v1.30.4+k3s1: + +* Testing And Secrets-Encryption Backports for 2024-09 [(#10801)](https://github.com/k3s-io/k3s/pull/10801) + * Update to newer OS images for install testing + * Fix caching name for e2e vagrant box + * Remove secrets encryption controller + * Cover edge case when on new minor release for E2E upgrade test + * Removes deprecated alpha Secrets Encryption metrics (deprecated in 1.30, removed in 1.31) +* Update CNI plugins version [(#10818)](https://github.com/k3s-io/k3s/pull/10818) +* Backports for 2024-09 [(#10843)](https://github.com/k3s-io/k3s/pull/10843) +* Fix hosts.toml header var [(#10872)](https://github.com/k3s-io/k3s/pull/10872) +* Update to v1.30.5-k3s1 and Go 1.22.6 [(#10888)](https://github.com/k3s-io/k3s/pull/10888) +* Update Kubernetes to v1.30.5-k3s2 [(#10909)](https://github.com/k3s-io/k3s/pull/10909) + +----- +## Release [v1.30.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.4+k3s1) + + +This release updates Kubernetes to v1.30.4, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1303). + +### Changes since v1.30.3+k3s1: + +* Bump docker/docker to v25.0.6 [(#10649)](https://github.com/k3s-io/k3s/pull/10649) +* Backports for 2024-08 release cycle [(#10664)](https://github.com/k3s-io/k3s/pull/10664) + * Use pagination when listing large numbers of resources + * Fix multiple issues with servicelb + * Remove deprecated use of wait. functions + * Wire lasso metrics up to metrics endpoint +* Backports for August 2024 [(#10671)](https://github.com/k3s-io/k3s/pull/10671) +* Bump containerd to v1.7.20 [(#10660)](https://github.com/k3s-io/k3s/pull/10660) +* Add tolerations support for DaemonSet pods [(#10703)](https://github.com/k3s-io/k3s/pull/10703) + * **New Feature**: Users can now define Kubernetes tolerations for ServiceLB DaemonSet directly in the `svccontroller.k3s.cattle.io/tolerations` annotation on services. +* Update to v1.30.4-k3s1 and Go 1.22.5 [(#10721)](https://github.com/k3s-io/k3s/pull/10721) + +----- +## Release [v1.30.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.3+k3s1) + + +This release updates Kubernetes to v1.30.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1302). + +### Changes since v1.30.2+k3s2: + +* Update channel server for k3s2 [(#10446)](https://github.com/k3s-io/k3s/pull/10446) +* Set correct release channel for e2e upgrade test [(#10460)](https://github.com/k3s-io/k3s/pull/10460) +* Backports for 2024-07 release cycle [(#10497)](https://github.com/k3s-io/k3s/pull/10497) + * Bump k3s-root to v0.14.0 + * Bump github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.7 + * Bump Local Path Provisioner version + * Ensure remotedialer kubelet connections use kubelet bind address + * Chore: Bump Trivy version + * Add etcd s3 config secret implementation +* July Test Backports [(#10507)](https://github.com/k3s-io/k3s/pull/10507) +* Update to v1.30.3-k3s1 and Go 1.22.5 [(#10536)](https://github.com/k3s-io/k3s/pull/10536) +* Fix issues loading data-dir value from env vars or dropping config files [(#10596)](https://github.com/k3s-io/k3s/pull/10596) + +----- +## Release [v1.30.2+k3s2](https://github.com/k3s-io/k3s/releases/tag/v1.30.2+k3s2) + + +This release updates Kubernetes to v1.30.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1302). + +### Changes since v1.30.2+k3s1: + +* Update stable channel to v1.29.6+k3s1 [(#10417)](https://github.com/k3s-io/k3s/pull/10417) +* Update flannel to v0.25.4 and fixed issue with IPv6 mask [(#10422)](https://github.com/k3s-io/k3s/pull/10422) + +----- +## Release [v1.30.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.2+k3s1) + + +This release updates Kubernetes to v1.30.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1301). + +### Changes since v1.30.1+k3s1: + +* Fix bug when using tailscale config by file [(#10074)](https://github.com/k3s-io/k3s/pull/10074) + * Fix bug when using `vpn-auth-file` in the agent +* Add WithSkipMissing to not fail import on missing blobs [(#10136)](https://github.com/k3s-io/k3s/pull/10136) +* Use fixed stream server bind address for cri-dockerd [(#9975)](https://github.com/k3s-io/k3s/pull/9975) +* Switch stargz over to cri registry config_path [(#9977)](https://github.com/k3s-io/k3s/pull/9977) +* Bump to containerd v1.7.17, etcd v3.5.13 [(#10123)](https://github.com/k3s-io/k3s/pull/10123) +* Bump spegel version [(#10118)](https://github.com/k3s-io/k3s/pull/10118) +* Fix issue installing artifacts from PR builds with multiple runs [(#10122)](https://github.com/k3s-io/k3s/pull/10122) +* Fix issue with `externalTrafficPolicy: Local` for single-stack services on dual-stack nodes [(#9963)](https://github.com/k3s-io/k3s/pull/9963) +* Update local-path-provisioner helper script [(#9964)](https://github.com/k3s-io/k3s/pull/9964) +* Add support for svclb pod PriorityClassName [(#10045)](https://github.com/k3s-io/k3s/pull/10045) + * ServiceLB now sets the priorityClassName on svclb pods to `system-node-critical` by default. This can be overridden on a per-service basis via the `svccontroller.k3s.cattle.io/priorityclassname` annotation. +* Drop check for legacy traefik v1 chart [(#9593)](https://github.com/k3s-io/k3s/pull/9593) + * K3s no longer automatically skips deploying traefik v2 if traefik v1 is present. All clusters should have been upgraded to v2 at some point over the last three years. +* Update kube-router version to v2.1.2 [(#10177)](https://github.com/k3s-io/k3s/pull/10177) +* Create ADR for branching strategy [(#10147)](https://github.com/k3s-io/k3s/pull/10147) +* Bump minio-go to v7.0.70 [(#10081)](https://github.com/k3s-io/k3s/pull/10081) +* Bump kine to v0.11.9 to fix pagination [(#10082)](https://github.com/k3s-io/k3s/pull/10082) +* Update valid resolv conf [(#9948)](https://github.com/k3s-io/k3s/pull/9948) +* Add missing kernel config check [(#10100)](https://github.com/k3s-io/k3s/pull/10100) +* Git workflow file name correction [(#10131)](https://github.com/k3s-io/k3s/pull/10131) + * None +* Follow directory symlinks in auto deploying manifests (#9288) [(#10049)](https://github.com/k3s-io/k3s/pull/10049) + * Symlinked sub-directories are now respected when scanning Auto-Deploying Manifests (AddOns) +* Fix bug: allow helm controller set owner reference [(#10048)](https://github.com/k3s-io/k3s/pull/10048) +* Fix go.mod [(#10192)](https://github.com/k3s-io/k3s/pull/10192) +* Bump flannel version to v0.25.2 [(#10146)](https://github.com/k3s-io/k3s/pull/10146) +* Test: add agent with auth file [(#10119)](https://github.com/k3s-io/k3s/pull/10119) + * Fix bug when using `vpn-auth-file` in the agent +* Add extra log in e2e tests [(#10145)](https://github.com/k3s-io/k3s/pull/10145) +* Update channel server for may 2024 [(#10137)](https://github.com/k3s-io/k3s/pull/10137) +* Bump klipper-helm image for tls secret support [(#10187)](https://github.com/k3s-io/k3s/pull/10187) +* Updating the script binary_size_check to complete the command name by… [(#9992)](https://github.com/k3s-io/k3s/pull/9992) +* Fix issue with k3s-etcd informers not starting [(#10047)](https://github.com/k3s-io/k3s/pull/10047) +* Enable serving supervisor metrics [(#10019)](https://github.com/k3s-io/k3s/pull/10019) + * `--Enable-pprof` can now be set on agents to enable the debug/pprof endpoints. When set, agents will listen on the supervisor port. + * `--Supervisor-metrics` can now be set on servers to enable serving internal metrics on the supervisor endpoint; when set agents will listen on the supervisor port. +* Bump alpine from 3.18 to 3.20 in /conformance [(#10210)](https://github.com/k3s-io/k3s/pull/10210) +* Bump alpine from 3.18 to 3.20 in /package [(#10211)](https://github.com/k3s-io/k3s/pull/10211) +* Bump ubuntu from 22.04 to 24.04 in /tests/e2e/scripts [(#10040)](https://github.com/k3s-io/k3s/pull/10040) +* Bump Trivy version [(#10039)](https://github.com/k3s-io/k3s/pull/10039) +* Fix netpol crash when node remains tainted uninitialized [(#10073)](https://github.com/k3s-io/k3s/pull/10073) +* Fix issue caused by sole server marked as failed under load [(#10241)](https://github.com/k3s-io/k3s/pull/10241) + * The embedded load-balancer will now fall back to trying all servers with health-checks ignored, if all servers have been marked unavailable due to failed health checks. +* Add write-kubeconfig-group flag to server [(#9233)](https://github.com/k3s-io/k3s/pull/9233) + * New flag in k3s server: --write-kubeconfig-group +* Fix embedded mirror blocked by SAR RBAC and re-enable test [(#10257)](https://github.com/k3s-io/k3s/pull/10257) +* Bump Local Path Provisioner version [(#10268)](https://github.com/k3s-io/k3s/pull/10268) +* Fix: Use actual warningPeriod in certmonitor [(#10271)](https://github.com/k3s-io/k3s/pull/10271) +* Fix bug that caused agents to bypass local loadbalancer [(#10280)](https://github.com/k3s-io/k3s/pull/10280) +* Add ADR for support for etcd s3 config secret [(#9364)](https://github.com/k3s-io/k3s/pull/9364) +* Add test for `isValidResolvConf` [(#10302)](https://github.com/k3s-io/k3s/pull/10302) +* Add snapshot retention etcd-s3-folder fix [(#10293)](https://github.com/k3s-io/k3s/pull/10293) +* Expand GHA golang caching to include newest release branch [(#10307)](https://github.com/k3s-io/k3s/pull/10307) +* Fix race condition panic in loadbalancer.nextServer [(#10318)](https://github.com/k3s-io/k3s/pull/10318) +* Fix typo, use `rancher/permissions` [(#10296)](https://github.com/k3s-io/k3s/pull/10296) +* Update Kubernetes to v1.30.2 [(#10349)](https://github.com/k3s-io/k3s/pull/10349) +* Fix agent supervisor port using apiserver port instead [(#10352)](https://github.com/k3s-io/k3s/pull/10352) +* Fix issue that allowed multiple simultaneous snapshots to be allowed [(#10372)](https://github.com/k3s-io/k3s/pull/10372) + +----- +## Release [v1.30.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.1+k3s1) + + +This release updates Kubernetes to v1.30.1, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1300). + +### Changes since v1.30.0+k3s1: + +* Replace deprecated ruby function in e2e tests [(#10084)](https://github.com/k3s-io/k3s/pull/10084) +* Update channels with 1.30 [(#10097)](https://github.com/k3s-io/k3s/pull/10097) +* Address 461 [(#10112)](https://github.com/k3s-io/k3s/pull/10112) +* Update to v1.30.1-k3s1 and Go 1.22.2 [(#10105)](https://github.com/k3s-io/k3s/pull/10105) + +----- +## Release [v1.30.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.30.0+k3s1) + + + +This release is K3S's first in the v1.30 line. This release updates Kubernetes to v1.30.0. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#changelog-since-v1290). + +### Changes since v1.29.4+k3s1: + +* Kubernetes V1.30.0-k3s1 [(#10063)](https://github.com/k3s-io/k3s/pull/10063) +* Update stable channel to v1.29.4+k3s1 [(#10031)](https://github.com/k3s-io/k3s/pull/10031) +* Add E2E Split Server to Drone, support parallel testing in Drone [(#9940)](https://github.com/k3s-io/k3s/pull/9940) +* Bump E2E opensuse leap to 15.6, fix btrfs test [(#10057)](https://github.com/k3s-io/k3s/pull/10057) +* Remove deprecated `pod-infra-container-image` kubelet flag [(#7409)](https://github.com/k3s-io/k3s/pull/7409) +* Fix e2e tests [(#10061)](https://github.com/k3s-io/k3s/pull/10061) + + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.31.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.31.X.md new file mode 100644 index 000000000..152a09903 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.31.X.md @@ -0,0 +1,160 @@ +--- +hide_table_of_contents: true +sidebar_position: 2 +--- + +# v1.31.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.31.4+k3s1](v1.31.X.md#release-v1314k3s1) | Dec 18 2024| [v1.31.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1314) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1](https://github.com/opencontainers/runc/releases/tag/v1.2.1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.12.0](https://github.com/coredns/coredns/releases/tag/v1.12.0) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.31.3+k3s1](v1.31.X.md#release-v1313k3s1) | Dec 04 2024| [v1.31.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1313) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1](https://github.com/opencontainers/runc/releases/tag/v1.2.1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.31.2+k3s1](v1.31.X.md#release-v1312k3s1) | Oct 26 2024| [v1.31.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1312) | [v0.13.2](https://github.com/k3s-io/kine/releases/tag/v0.13.2) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.22-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.22-k3s1) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | +| [v1.31.1+k3s1](v1.31.X.md#release-v1311k3s1) | Sep 19 2024| [v1.31.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1311) | [v0.12.0](https://github.com/k3s-io/kine/releases/tag/v0.12.0) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.21-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.21-k3s2) | [v1.1.14](https://github.com/opencontainers/runc/releases/tag/v1.1.14) | [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.8](https://github.com/traefik/traefik/releases/tag/v2.11.8) | [v1.11.3](https://github.com/coredns/coredns/releases/tag/v1.11.3) | [v0.16.4](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.4) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | +| [v1.31.0+k3s1](v1.31.X.md#release-v1310k3s1) | Sep 02 2024| [v1.31.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1310) | [v0.12.0](https://github.com/k3s-io/kine/releases/tag/v0.12.0) | [3.44.0](https://sqlite.org/releaselog/3_44_0.html) | [v3.5.13-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.13-k3s1) | [v1.7.20-k3s1](https://github.com/k3s-io/containerd/releases/tag/v1.7.20-k3s1) | [v1.1.12](https://github.com/opencontainers/runc/releases/tag/v1.1.12) | [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4) | [v0.7.0](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.0) | [v2.10.7](https://github.com/traefik/traefik/releases/tag/v2.10.7) | [v1.10.1](https://github.com/coredns/coredns/releases/tag/v1.10.1) | [v0.16.3](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.3) | [v0.0.28](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.28) | + +
+ +## Release [v1.31.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.31.4+k3s1) + + +This release updates Kubernetes to v1.31.4, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#changelog-since-v1313). + +### Changes since v1.31.3+k3s1: + +* Fix secrets-encrypt reencrypt timeout error [(#11442)](https://github.com/k3s-io/k3s/pull/11442) +* Remove experimental from embedded-registry flag [(#11444)](https://github.com/k3s-io/k3s/pull/11444) +* Rework loadbalancer server selection logic [(#11457)](https://github.com/k3s-io/k3s/pull/11457) + * The embedded client loadbalancer that handles connectivity to control-plane elements has been extensively reworked for improved performance, reliability, and observability. +* Update coredns to 1.12.0 [(#11454)](https://github.com/k3s-io/k3s/pull/11454) +* Add node-internal-dns/node-external-dns address pass-through support … [(#11464)](https://github.com/k3s-io/k3s/pull/11464) +* Update to v1.31.4-k3s1 and Go 1.22.9 [(#11462)](https://github.com/k3s-io/k3s/pull/11462) + +----- +## Release [v1.31.3+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.31.3+k3s1) + + +This release updates Kubernetes to v1.31.3, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#changelog-since-v1312). + +### Changes since v1.31.2+k3s1: + +* Backport E2E GHA fixes [(#11230)](https://github.com/k3s-io/k3s/pull/11230) +* Backports for 2024-11 [(#11261)](https://github.com/k3s-io/k3s/pull/11261) +* Update flannel and base cni plugins version [(#11247)](https://github.com/k3s-io/k3s/pull/11247) +* Bump to latest k3s-root version in scripts/version.sh [(#11302)](https://github.com/k3s-io/k3s/pull/11302) +* More backports for 2024-11 [(#11307)](https://github.com/k3s-io/k3s/pull/11307) +* Fix issue with loadbalancer failover to default server [(#11324)](https://github.com/k3s-io/k3s/pull/11324) +* Update Kubernetes to v1.31.3-k3s1 [(#11372)](https://github.com/k3s-io/k3s/pull/11372) +* Bump containerd to -k3s2 to fix rewrites [(#11403)](https://github.com/k3s-io/k3s/pull/11403) + +----- +## Release [v1.31.2+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.31.2+k3s1) + + +This release updates Kubernetes to v1.31.2, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#changelog-since-v1311). + +### Changes since v1.31.1+k3s1: + +* Add int test for flannel-ipv6masq [(#10904)](https://github.com/k3s-io/k3s/pull/10904) +* Bump Wharfie to v0.6.7 [(#10974)](https://github.com/k3s-io/k3s/pull/10974) +* Add user path to runtimes search [(#11002)](https://github.com/k3s-io/k3s/pull/11002) +* Add e2e test for advanced fields in services [(#11023)](https://github.com/k3s-io/k3s/pull/11023) +* Launch private registry with init [(#11048)](https://github.com/k3s-io/k3s/pull/11048) +* Backports for 2024-10 [(#11054)](https://github.com/k3s-io/k3s/pull/11054) +* Allow additional Rootless CopyUpDirs through K3S_ROOTLESS_COPYUPDIRS [(#11041)](https://github.com/k3s-io/k3s/pull/11041) +* Bump containerd to v1.7.22 [(#11072)](https://github.com/k3s-io/k3s/pull/11072) +* Simplify svclb ds [(#11079)](https://github.com/k3s-io/k3s/pull/11079) +* Add the nvidia runtime cdi [(#11093)](https://github.com/k3s-io/k3s/pull/11093) +* Revert "Make svclb as simple as possible" [(#11118)](https://github.com/k3s-io/k3s/pull/11118) +* Fixes "file exists" error from CNI bins when upgrading k3s [(#11125)](https://github.com/k3s-io/k3s/pull/11125) +* Update Kubernetes to v1.31.2 [(#11155)](https://github.com/k3s-io/k3s/pull/11155) + +----- +## Release [v1.31.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.31.1+k3s1) + + +This release updates Kubernetes to v1.31.1, and fixes a number of issues. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#changelog-since-v1310). + +### Changes since v1.31.0+k3s1: + +* Testing And Secrets-Encryption Backports for 2024-09 [(#10802)](https://github.com/k3s-io/k3s/pull/10802) + * Remove secrets encryption controller + * Cover edge case when on new minor release for E2E upgrade test +* Update CNI plugins version [(#10817)](https://github.com/k3s-io/k3s/pull/10817) +* Backports for 2024-09 [(#10842)](https://github.com/k3s-io/k3s/pull/10842) +* Fix hosts.toml header var [(#10871)](https://github.com/k3s-io/k3s/pull/10871) +* Update Kubernetes to v1.31.1 [(#10895)](https://github.com/k3s-io/k3s/pull/10895) +* Update Kubernetes to v1.31.1-k3s3 [(#10910)](https://github.com/k3s-io/k3s/pull/10910) + +----- +## Release [v1.31.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.31.0+k3s1) + + +This release is K3S's first in the v1.31 line. This release updates Kubernetes to v1.31.0. + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#changelog-since-v1300). + +### Changes since v1.30.4+k3s1: + +* Move test-compat docker test to GHA [(#10414)](https://github.com/k3s-io/k3s/pull/10414) +* Check for bad token permissions when install via PR [(#10387)](https://github.com/k3s-io/k3s/pull/10387) +* Bump k3s-root to v0.14.0 [(#10466)](https://github.com/k3s-io/k3s/pull/10466) + * The k3s bundled userspace has been bumped to a release based on buildroot 2024.02.3, addressing several CVEs in busybox and coreutils. +* Fix INSTALL_K3S_PR support [(#10472)](https://github.com/k3s-io/k3s/pull/10472) +* Add `data-dir` to uninstall and killall scripts [(#10473)](https://github.com/k3s-io/k3s/pull/10473) +* Bump github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.7 [(#10400)](https://github.com/k3s-io/k3s/pull/10400) +* Bump golang:alpine image version [(#10359)](https://github.com/k3s-io/k3s/pull/10359) +* Bump Local Path Provisioner version [(#10394)](https://github.com/k3s-io/k3s/pull/10394) +* Ensure remotedialer kubelet connections use kubelet bind address [(#10480)](https://github.com/k3s-io/k3s/pull/10480) + * Fixed an issue where setting the `--bind-address` flag to a non-loopback or wildcard address would prevent `kubectl logs` from working properly. +* Bump Trivy version [(#10339)](https://github.com/k3s-io/k3s/pull/10339) +* Add etcd s3 config secret implementation [(#10340)](https://github.com/k3s-io/k3s/pull/10340) + * A proxy can now be configured for use when uploading etcd snapshots to a s3-compatible storage service. This overrides any proxy settings passed via environment variables. + * Credentials and endpoint configuration for storing etcd snapshots on a s3-compatible storage service can now be read from a Secret, instead of passing them via the CLI or config file. See https://github.com/k3s-io/k3s/blob/master/docs/adrs/etcd-s3-secret.md for more information. +* For E2E upgrade test, automatically determine the channel to use [(#10461)](https://github.com/k3s-io/k3s/pull/10461) +* Bump kine to v0.11.11 [(#10494)](https://github.com/k3s-io/k3s/pull/10494) +* Fix loadbalancer reentrant rlock [(#10511)](https://github.com/k3s-io/k3s/pull/10511) + * Fixed an issue that could cause the agent loadbalancer to deadlock when the currently in-use server goes down. +* Don't use server value from config file for etcd-snapshot commands [(#10514)](https://github.com/k3s-io/k3s/pull/10514) + * The `--server` and `--token` flags for the `k3s etcd-snapshot` command have been renamed to `--etcd-server` and `--etcd-token`, to avoid unintentionally running snapshot management commands against a remote node when the cluster join address or token are present in a config file. +* Use pagination when listing large numbers of resources [(#10527)](https://github.com/k3s-io/k3s/pull/10527) +* Fix multiple issues with servicelb [(#10552)](https://github.com/k3s-io/k3s/pull/10552) + * Fixed issue that caused ServiceLB to fail to create a daemonset for services with long names + * Fixed issue that caused ServiceLB pods to crashloop on nodes with ipv6 disabled at the kernel level +* Enhance E2E Hardened option [(#10558)](https://github.com/k3s-io/k3s/pull/10558) +* Allow Pprof and Superisor metrics in standalone mode [(#10576)](https://github.com/k3s-io/k3s/pull/10576) +* Use higher QPS for secrets reencryption [(#10571)](https://github.com/k3s-io/k3s/pull/10571) +* Fix issues loading data-dir value from env vars or dropin config files [(#10591)](https://github.com/k3s-io/k3s/pull/10591) +* Remove deprecated use of wait. functions [(#10546)](https://github.com/k3s-io/k3s/pull/10546) +* Wire lasso metrics up to metrics endpoint [(#10528)](https://github.com/k3s-io/k3s/pull/10528) +* Update stable channel to v1.30.3+k3s1 [(#10647)](https://github.com/k3s-io/k3s/pull/10647) +* Bump docker/docker to v25.0.6 [(#10642)](https://github.com/k3s-io/k3s/pull/10642) +* Add a change for killall to not unmount server and agent directory [(#10403)](https://github.com/k3s-io/k3s/pull/10403) +* Allow edge case OS rpm installs [(#10680)](https://github.com/k3s-io/k3s/pull/10680) +* Bump containerd to v1.7.20 [(#10659)](https://github.com/k3s-io/k3s/pull/10659) +* Update to newer OS images for install testing [(#10681)](https://github.com/k3s-io/k3s/pull/10681) +* Bump helm-controller to v0.16.3 to drop Helm v2 support [(#10628)](https://github.com/k3s-io/k3s/pull/10628) +* Add toleration support to ServiceLB DaemonSet [(#10687)](https://github.com/k3s-io/k3s/pull/10687) + * - **New Feature**: Users can now define Kubernetes tolerations for ServiceLB DaemonSet directly in the `svccontroller.k3s.cattle.io/tolerations` annotation on services. +* Fix: Add $SUDO prefix to transactional-update commands in install script [(#10531)](https://github.com/k3s-io/k3s/pull/10531) +* Update to v1.30.3-k3s1 and Go 1.22.5 [(#10707)](https://github.com/k3s-io/k3s/pull/10707) +* Fix caching name for e2e vagrant box [(#10695)](https://github.com/k3s-io/k3s/pull/10695) +* Fix k3s-killall.sh support for custom data dir [(#10709)](https://github.com/k3s-io/k3s/pull/10709) +* Adding MariaDB to README.md [(#10717)](https://github.com/k3s-io/k3s/pull/10717) +* Bump Trivy version [(#10670)](https://github.com/k3s-io/k3s/pull/10670) +* V1.31.0-k3s1 [(#10715)](https://github.com/k3s-io/k3s/pull/10715) +* Update kubernetes to v1.31.0-k3s3 [(#10780)](https://github.com/k3s-io/k3s/pull/10780) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.32.X.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.32.X.md new file mode 100644 index 000000000..286868fb5 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/release-notes/v1.32.X.md @@ -0,0 +1,158 @@ +--- +hide_table_of_contents: true +sidebar_position: 1 +--- + +# v1.32.X + +:::warning Upgrade Notice +Before upgrading from earlier releases, be sure to read the Kubernetes [Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.32.md#urgent-upgrade-notes). +::: + +| Version | Release date | Kubernetes | Kine | SQLite | Etcd | Containerd | Runc | Flannel | Metrics-server | Traefik | CoreDNS | Helm-controller | Local-path-provisioner | +| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| [v1.32.0+k3s1](v1.32.X.md#release-v1320k3s1) | Jan 10 2025| [v1.32.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.32.md#v1320) | [v0.13.5](https://github.com/k3s-io/kine/releases/tag/v0.13.5) | [3.46.1](https://sqlite.org/releaselog/3_46_1.html) | [v3.5.16-k3s1](https://github.com/k3s-io/etcd/releases/tag/v3.5.16-k3s1) | [v1.7.23-k3s2](https://github.com/k3s-io/containerd/releases/tag/v1.7.23-k3s2) | [v1.2.1-k3s1](https://github.com/opencontainers/runc/releases/tag/v1.2.1-k3s1) | [v0.25.7](https://github.com/flannel-io/flannel/releases/tag/v0.25.7) | [v0.7.2](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.7.2) | [v2.11.10](https://github.com/traefik/traefik/releases/tag/v2.11.10) | [v1.12.0](https://github.com/coredns/coredns/releases/tag/v1.12.0) | [v0.16.5](https://github.com/k3s-io/helm-controller/releases/tag/v0.16.5) | [v0.0.30](https://github.com/rancher/local-path-provisioner/releases/tag/v0.0.30) | + +
+ +## Release [v1.32.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.32.0+k3s1) + + +This release is K3S's first in the v1.32 line. This release updates Kubernetes to v1.32.0. + +Kubernetes 1.32 moves the `AuthorizeNodeWithSelectors` feature gate to Beta and on by default. See [KEP-4601](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/4601-authorize-with-selectors/README.md) for more information. + +This feature-gate breaks some of the RBAC that previous releases of K3s relied upon. The January releases of K3s v1.29, v1.30, and v1.31 will contain backported fixes. Until then, you must set `--kube-apiserver-arg=feature-gates=AuthorizeNodeWithSelectors=false` on server nodes, if you want to mix K3s v1.32 nodes with nodes of other versions (within the limits of what is supported by the [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/)). + +For more details on what's new, see the [Kubernetes release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.32.md#changelog-since-v1310). + +### Changes since v1.31.4+k3s1: + +* Fix rotateca validation failures when not touching default self-signed CAs [(#10710)](https://github.com/k3s-io/k3s/pull/10710) +* Bump runc to v1.1.13 [(#10737)](https://github.com/k3s-io/k3s/pull/10737) +* Update stable channel to v1.30.4+k3s1 [(#10739)](https://github.com/k3s-io/k3s/pull/10739) +* Fix deploy latest commit on E2E tests [(#10725)](https://github.com/k3s-io/k3s/pull/10725) +* Remove secrets encryption controller [(#10612)](https://github.com/k3s-io/k3s/pull/10612) +* Update kubernetes to v1.31.0-k3s3 [(#10764)](https://github.com/k3s-io/k3s/pull/10764) +* Bump traefik to v2.11.8 [(#10779)](https://github.com/k3s-io/k3s/pull/10779) +* Update coredns to 1.11.3 and metrics-server to 0.7.2 [(#10760)](https://github.com/k3s-io/k3s/pull/10760) +* Add trivy scanning to PR reports [(#10758)](https://github.com/k3s-io/k3s/pull/10758) +* Cover edge case when on new minor release for E2E upgrade test [(#10781)](https://github.com/k3s-io/k3s/pull/10781) +* Bump aquasecurity/trivy-action from 0.20.0 to 0.24.0 [(#10795)](https://github.com/k3s-io/k3s/pull/10795) +* Update CNI plugins version [(#10798)](https://github.com/k3s-io/k3s/pull/10798) +* Bump Sonobuoy version [(#10792)](https://github.com/k3s-io/k3s/pull/10792) +* Fix /trivy action running against target branch instead of PR branch [(#10824)](https://github.com/k3s-io/k3s/pull/10824) +* Launch private registry with init [(#10822)](https://github.com/k3s-io/k3s/pull/10822) +* Add channel for v1.31 [(#10826)](https://github.com/k3s-io/k3s/pull/10826) +* Bump containerd to v1.7.21, runc to v1.1.14 [(#10805)](https://github.com/k3s-io/k3s/pull/10805) +* Bump helm-controller for skip-verify/plain-http and updated tolerations [(#10832)](https://github.com/k3s-io/k3s/pull/10832) +* Tag PR image build as latest before scanning [(#10825)](https://github.com/k3s-io/k3s/pull/10825) +* Only clean up containerd hosts dirs managed by k3s [(#10823)](https://github.com/k3s-io/k3s/pull/10823) +* Remove otelgrpc pinned dependency [(#10799)](https://github.com/k3s-io/k3s/pull/10799) +* Add node-internal-dns/node-external-dns address pass-through support [(#10852)](https://github.com/k3s-io/k3s/pull/10852) +* Give good report if no CVEs found in trivy [(#10853)](https://github.com/k3s-io/k3s/pull/10853) +* Fix hosts.toml header var [(#10870)](https://github.com/k3s-io/k3s/pull/10870) +* Bump Trivy version [(#10863)](https://github.com/k3s-io/k3s/pull/10863) +* Add int test for flannel-ipv6masq [(#10440)](https://github.com/k3s-io/k3s/pull/10440) +* Bump Trivy version [(#10899)](https://github.com/k3s-io/k3s/pull/10899) +* Update Kubernetes to v1.31.1-k3s3 [(#10911)](https://github.com/k3s-io/k3s/pull/10911) +* Add MariaDB to CI [(#10724)](https://github.com/k3s-io/k3s/pull/10724) +* Update stable channel tov1.30.5+k3s1 [(#10921)](https://github.com/k3s-io/k3s/pull/10921) +* Use static CNI bin dir [(#10868)](https://github.com/k3s-io/k3s/pull/10868) + * K3s now uses a stable directory for CNI binaries, which simplifies the installation of additional CNI plugins. +* Breakup trivy scan and check comment author [(#10935)](https://github.com/k3s-io/k3s/pull/10935) +* Fix getMembershipForUserInOrg call [(#10937)](https://github.com/k3s-io/k3s/pull/10937) +* Check k3s-io organization membership not team membership for trivy scans [(#10940)](https://github.com/k3s-io/k3s/pull/10940) +* Bump kine to v0.13.0 [(#10932)](https://github.com/k3s-io/k3s/pull/10932) + * Kine has been bumped to v0.13.0. This release includes changes that should enhance performance when using postgres as an external DB. The updated schema will be automatically used for new databases; to migrate to the new schema on existing databases, K3s can be started with the `KINE_SCHEMA_MIGRATION=2` environment variable set. +* Fix trivy report download [(#10943)](https://github.com/k3s-io/k3s/pull/10943) +* Trivy workflow: Specify GH_REPO env to use gh cli [(#10949)](https://github.com/k3s-io/k3s/pull/10949) +* Bump Trivy version [(#10924)](https://github.com/k3s-io/k3s/pull/10924) +* Bump traefik to chart 27.0.2 [(#10939)](https://github.com/k3s-io/k3s/pull/10939) +* Pass Rancher's VEX report to Trivy to remove known false-positives CVEs [(#10956)](https://github.com/k3s-io/k3s/pull/10956) +* Fix trivy vex line [(#10970)](https://github.com/k3s-io/k3s/pull/10970) +* Add user path to runtimes search [(#10953)](https://github.com/k3s-io/k3s/pull/10953) + * Runtimes detection will now use $PATH +* Bump to new wharfie version [(#10971)](https://github.com/k3s-io/k3s/pull/10971) +* Update README.md [(#10523)](https://github.com/k3s-io/k3s/pull/10523) +* Remove trailing whitespace [(#9362)](https://github.com/k3s-io/k3s/pull/9362) +* Bump kine to v0.13.2 [(#10978)](https://github.com/k3s-io/k3s/pull/10978) +* Allow configuration of Rootlesskit's CopyUpDirs through an environment variable [(#10386)](https://github.com/k3s-io/k3s/pull/10386) + * Add new environment variable "K3S_ROOTLESS_COPYUPDIRS" to add folders to the Rootlesskit configuration. +* Fix race condition when multiple nodes reconcile S3 snapshots [(#10979)](https://github.com/k3s-io/k3s/pull/10979) +* Bump Trivy version [(#10996)](https://github.com/k3s-io/k3s/pull/10996) +* Add ca-cert rotation integration test, and fix ca-cert rotation [(#11013)](https://github.com/k3s-io/k3s/pull/11013) +* Add e2e test which verifies traffic policies and firewall in services [(#10972)](https://github.com/k3s-io/k3s/pull/10972) +* Update tcpproxy for import path change [(#11029)](https://github.com/k3s-io/k3s/pull/11029) +* Bump Local Path Provisioner version [(#10862)](https://github.com/k3s-io/k3s/pull/10862) +* Bump local-path-provisioner to v0.0.30 [(#11049)](https://github.com/k3s-io/k3s/pull/11049) +* Bump helm-controller and klipper-helm [(#11060)](https://github.com/k3s-io/k3s/pull/11060) +* Bump containerd to v1.7.22 [(#11067)](https://github.com/k3s-io/k3s/pull/11067) +* Simplify svclb daemonset [(#10954)](https://github.com/k3s-io/k3s/pull/10954) + * Stop using klipper-lb as the image for svclb. Replace it with a simple busybox which just sleeps +* Add the nvidia runtime cdi [(#11065)](https://github.com/k3s-io/k3s/pull/11065) + * Add nvidia cdi runtime to the list of supported and discoverable runtimes +* Bump Trivy version [(#11103)](https://github.com/k3s-io/k3s/pull/11103) +* Rollback GHA to Ubuntu 22.04 [(#11111)](https://github.com/k3s-io/k3s/pull/11111) +* Revert "Make svclb as simple as possible" [(#11109)](https://github.com/k3s-io/k3s/pull/11109) +* Fix Github Actions for Ubuntu-24.04 [(#11112)](https://github.com/k3s-io/k3s/pull/11112) +* Bump aquasecurity/trivy-action from 0.24.0 to 0.27.0 [(#11105)](https://github.com/k3s-io/k3s/pull/11105) +* Check the last 10 commits for upgrade E2E test [(#11086)](https://github.com/k3s-io/k3s/pull/11086) +* Bump aquasecurity/trivy-action from 0.27.0 to 0.28.0 [(#11138)](https://github.com/k3s-io/k3s/pull/11138) +* Fixes "file exists" error from CNI bins when upgrading k3s [(#11123)](https://github.com/k3s-io/k3s/pull/11123) +* Reduce the number of GH api request for E2E nightly [(#11148)](https://github.com/k3s-io/k3s/pull/11148) +* Update Kubernetes to v1.31.2-k3s1 and Go 1.22.8 [(#11163)](https://github.com/k3s-io/k3s/pull/11163) +* Update stable channel to v1.30.6+k3s1 [(#11186)](https://github.com/k3s-io/k3s/pull/11186) +* Fix timeout when defragmenting etcd on startup [(#11164)](https://github.com/k3s-io/k3s/pull/11164) +* Capture all fedora atomic variants in install script [(#11170)](https://github.com/k3s-io/k3s/pull/11170) + * Allow easier installation of k3s on all variants of fedora atomic that use rpm-ostree +* Typo fixes in contributing.md [(#11201)](https://github.com/k3s-io/k3s/pull/11201) +* Bump Trivy version [(#11206)](https://github.com/k3s-io/k3s/pull/11206) +* Pin vagrant to older version to avoid known issue 13527 [(#11226)](https://github.com/k3s-io/k3s/pull/11226) +* Set kine EmulatedETCDVersion from embedded etcd version [(#11221)](https://github.com/k3s-io/k3s/pull/11221) +* Add nonroot-devices flag to agent CLI [(#11200)](https://github.com/k3s-io/k3s/pull/11200) + * `Device_ownership_from_security_context` can now be enabled in the containerd CRI config by setting the `--nonroot-devices` flag or config key. +* Bump runc to v1.2 [(#10896)](https://github.com/k3s-io/k3s/pull/10896) +* Update flannel and base cni plugins version [(#11188)](https://github.com/k3s-io/k3s/pull/11188) +* Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 [(#11236)](https://github.com/k3s-io/k3s/pull/11236) +* Fix MustFindString returning override flags on external CLI commands [(#11237)](https://github.com/k3s-io/k3s/pull/11237) +* Bump containerd to v1.7.23-k3s1 to fix registry rewrite token scopes [(#11238)](https://github.com/k3s-io/k3s/pull/11238) +* Fix the "Standalone"-mode of oidc-login in the wrapped kubectl library [(#11266)](https://github.com/k3s-io/k3s/pull/11266) + * Fixes 'no Auth Provider found for name "oidc"' when using oidc-login in standalone mode. +* Bump K3s-root version to v0.14.1 [(#11282)](https://github.com/k3s-io/k3s/pull/11282) +* Bump kine [(#11277)](https://github.com/k3s-io/k3s/pull/11277) +* Bump kine for mysql connection close fix [(#11305)](https://github.com/k3s-io/k3s/pull/11305) +* Fix handling of wrapped subcommands when run with a path [(#11306)](https://github.com/k3s-io/k3s/pull/11306) +* Fix updatecli config for klipper and helm-controller [(#11290)](https://github.com/k3s-io/k3s/pull/11290) +* Fix issue with loadbalancer failover to default server [(#11319)](https://github.com/k3s-io/k3s/pull/11319) +* Update `localstorage_int_test.go` reference [(#11339)](https://github.com/k3s-io/k3s/pull/11339) + * Update `localstorage_int_test.go` reference in `tests/integration/README.md` +* Add to the output command to be consistent with the product command [(#11345)](https://github.com/k3s-io/k3s/pull/11345) +* Allow install script to print error on failed binary download [(#11335)](https://github.com/k3s-io/k3s/pull/11335) +* Remove the go toolchain line [(#11358)](https://github.com/k3s-io/k3s/pull/11358) +* Add ubuntu 24.04 apt command for e2e test [(#11361)](https://github.com/k3s-io/k3s/pull/11361) +* Bump Trivy version [(#11360)](https://github.com/k3s-io/k3s/pull/11360) +* Bump aquasecurity/trivy-action from 0.28.0 to 0.29.0 [(#11364)](https://github.com/k3s-io/k3s/pull/11364) +* Convert legacy docker tests from bash to golang [(#11357)](https://github.com/k3s-io/k3s/pull/11357) +* Update Kubernetes to v1.31.3-k3s1 [(#11373)](https://github.com/k3s-io/k3s/pull/11373) +* Fix Branch Name logic for Dependabot and UpdateCLI pushes to k3s-io [(#11376)](https://github.com/k3s-io/k3s/pull/11376) +* Fix INSTALL_K3S_PR support [(#11383)](https://github.com/k3s-io/k3s/pull/11383) +* Fix etcd backup/restore test and add guardrail for etcd-snapshot [(#11314)](https://github.com/k3s-io/k3s/pull/11314) +* Bump containerd to -k3s2 to fix rewrites [(#11401)](https://github.com/k3s-io/k3s/pull/11401) +* Fix opensuse-leap install test [(#11379)](https://github.com/k3s-io/k3s/pull/11379) +* Fix secrets-encrypt reencrypt timeout error [(#11385)](https://github.com/k3s-io/k3s/pull/11385) +* Rework loadbalancer server selection logic [(#11329)](https://github.com/k3s-io/k3s/pull/11329) +* Remove experimental from embedded-registry flag [(#11443)](https://github.com/k3s-io/k3s/pull/11443) +* Update stable channel to v1.31.3+k3s1 [(#11436)](https://github.com/k3s-io/k3s/pull/11436) +* Fix agent tunnel address with dedicated supervisor port [(#11427)](https://github.com/k3s-io/k3s/pull/11427) +* Update coredns to 1.12.0 [(#11387)](https://github.com/k3s-io/k3s/pull/11387) +* Bump Trivy version [(#11430)](https://github.com/k3s-io/k3s/pull/11430) +* Update to v1.31.4-k3s1 and Go 1.22.9 [(#11463)](https://github.com/k3s-io/k3s/pull/11463) +* Bump alpine from 3.20 to 3.21 in /conformance [(#11433)](https://github.com/k3s-io/k3s/pull/11433) +* Fix docker check warnings [(#11474)](https://github.com/k3s-io/k3s/pull/11474) +* Update stable channel to v1.31.4+k3s1 [(#11483)](https://github.com/k3s-io/k3s/pull/11483) +* V1.32.0+k3s1 [(#11478)](https://github.com/k3s-io/k3s/pull/11478) +* Switch to using kubelet config file for all supported flags [(#10433)](https://github.com/k3s-io/k3s/pull/10433) +* Load kernel modules for nft in agent setup [(#11527)](https://github.com/k3s-io/k3s/pull/11527) + +----- diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/hardening-guide.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/hardening-guide.md new file mode 100644 index 000000000..07294f35b --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/hardening-guide.md @@ -0,0 +1,701 @@ +--- +title: "Guia de Segurança CIS." +--- + +Este documento fornece orientação prescritiva para fortalecer uma instalação de produção do K3s. Ele descreve as configurações e os controles necessários para abordar os controles de benchmark do Kubernetes do Center for Internet Security (CIS). + +O K3s tem uma série de mitigações de segurança aplicadas e ativadas por padrão e passará por uma série de controles do Kubernetes CIS sem modificação. Há algumas exceções notáveis ​​a isso que exigem intervenção manual para cumprir totalmente com o CIS Benchmark: + +1. O K3s não modificará o sistema operacional do host. Quaisquer modificações no nível do host precisarão ser feitas manualmente. +2. Certos controles de política do CIS para `NetworkPolicies` e `PodSecurityStandards` (`PodSecurityPolicies` na v1.24 e anteriores) restringirão a funcionalidade do cluster. Você deve optar por fazer com que o K3s configure isso adicionando as opções apropriadas (habilitando plugins de admissão) aos seus sinalizadores de linha de comando ou arquivo de configuração, bem como aplicando manualmente as políticas apropriadas. Mais detalhes são apresentados nas seções abaixo. + +A primeira seção (1.1) do CIS Benchmark se preocupa principalmente com as permissões e propriedade do manifesto do pod. O K3s não os utiliza para os componentes principais, pois tudo é empacotado em um único binário. + +## Requisitos de Nível de Host + +Há duas áreas de requisitos de nível de host: parâmetros do kernel e configuração de processo/diretório etcd. Elas são descritas nesta seção. + +### Certifique-se de que `protect-kernel-defaults` esteja definido + +Este é um sinalizador do kubelet que fará com que o kubelet saia se os parâmetros do kernel necessários não forem definidos ou forem definidos para valores diferentes dos padrões do kubelet. + +> **Observação:** `protect-kernel-defaults` é exposto como um sinalizador de nível superior para K3s. + +#### Definir Parâmetros do Kernel + +Crie um arquivo chamado `/etc/sysctl.d/90-kubelet.conf` e adicione o snippet abaixo. Em seguida, execute `sysctl -p /etc/sysctl.d/90-kubelet.conf`. + +```bash +vm.panic_on_oom=0 +vm.overcommit_memory=1 +kernel.panic=10 +kernel.panic_on_oops=1 +``` + +## Requisitos de Tempo de Execução do Kubernetes + +Os requisitos de tempo de execução para cumprir com o CIS Benchmark são centralizados em torno da segurança do pod (via PSP ou PSA), políticas de rede e logs de auditoria do API Server. Eles são descritos nesta seção. + +Por padrão, o K3s não inclui nenhuma segurança de pod ou políticas de rede. No entanto, o K3s é fornecido com um controlador que aplicará políticas de rede, se alguma for criada. O K3s não habilita a auditoria por padrão, portanto, a configuração do log de auditoria e a política de auditoria devem ser criadas manualmente. Por padrão, o K3s é executado com os controladores de admissão `PodSecurity` e `NodeRestriction` habilitados, entre outros. + +### Segurança do Pod + + + + +O K3s v1.25 e versões mais recentes oferecem suporte a [Pod Security Admissions (PSAs)](https://kubernetes.io/docs/concepts/security/pod-security-admission/) para controlar a segurança do pod. Os PSAs são habilitados passando o seguinte sinalizador para o servidor K3s: +``` +--kube-apiserver-arg="admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml" +``` + +A política deve ser escrita em um arquivo chamado `psa.yaml` no diretório `/var/lib/rancher/k3s/server`. + +Aqui está um exemplo de um PSA compatível: +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "restricted" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [kube-system, cis-operator-system] +``` + + + +O K3s v1.24 e versões mais antigas oferecem suporte a [Pod Security Policies (PSPs)](https://kubernetes.io/docs/concepts/security/pod-security-policy/) para controlar a segurança do pod. Os PSPs são habilitados passando o seguinte sinalizador para o servidor K3s: + +``` +--kube-apiserver-arg="enable-admission-plugins=NodeRestriction,PodSecurityPolicy" +``` +Isso terá o efeito de manter o plugin `NodeRestriction`, bem como habilitar o `PodSecurityPolicy`. + +Quando os PSPs são habilitados, uma política pode ser aplicada para satisfazer os controles necessários descritos na seção 5.2 do CIS Benchmark. + +Aqui está um exemplo de um PSP compatível: + +```yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: restricted-psp +spec: + privileged: false # CIS - 5.2.1 + allowPrivilegeEscalation: false # CIS - 5.2.5 + requiredDropCapabilities: # CIS - 5.2.7/8/9 + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'csi' + - 'persistentVolumeClaim' + - 'ephemeral' + hostNetwork: false # CIS - 5.2.4 + hostIPC: false # CIS - 5.2.3 + hostPID: false # CIS - 5.2.2 + runAsUser: + rule: 'MustRunAsNonRoot' # CIS - 5.2.6 + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +``` + +Para que o PSP acima seja efetivo, precisamos criar um ClusterRole e um ClusterRoleBinding. Também precisamos incluir uma "política irrestrita do sistema", que é necessária para pods de nível de sistema que exigem privilégios adicionais, e uma política adicional que permite que sysctls necessários para o servicelb funcionem corretamente. + +Combinando a configuração acima com a [Política de Rede](#networkpolicies) descrita na próxima seção, um único arquivo pode ser colocado no diretório `/var/lib/rancher/k3s/server/manifests`. Aqui está um exemplo de um arquivo `policy.yaml`: + +```yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: restricted-psp +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'csi' + - 'persistentVolumeClaim' + - 'ephemeral' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: system-unrestricted-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + allowPrivilegeEscalation: true + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + hostIPC: true + hostNetwork: true + hostPID: true + hostPorts: + - max: 65535 + min: 0 + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: svclb-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_ADMIN + allowedUnsafeSysctls: + - net.ipv4.ip_forward + - net.ipv6.conf.all.forwarding + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:restricted-psp +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - restricted-psp +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:system-unrestricted-psp +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - system-unrestricted-psp + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psp:svclb-psp +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - svclb-psp + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: default:restricted-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted-psp +subjects: +- kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-unrestricted-node-psp-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:system-unrestricted-psp +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: system-unrestricted-svc-acct-psp-rolebinding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:system-unrestricted-psp +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: svclb-psp-rolebinding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:svclb-psp +subjects: +- kind: ServiceAccount + name: svclb +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: intra-namespace + namespace: kube-system +spec: + podSelector: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + name: kube-system +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: intra-namespace + namespace: default +spec: + podSelector: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + name: default +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: intra-namespace + namespace: kube-public +spec: + podSelector: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + name: kube-public +``` + + + + + +> **Observação:** As adições críticas do Kubernetes, como CNI, DNS e Ingress, são executadas como pods no namespace `kube-system`. Portanto, esse namespace terá uma política menos restritiva para que esses componentes possam ser executados corretamente. + + +### NetworkPolicies + +O CIS exige que todos os namespaces tenham uma política de rede aplicada que limite razoavelmente o tráfego em namespaces e pods. + +As políticas de rede devem ser colocadas no diretório `/var/lib/rancher/k3s/server/manifests`, onde serão automaticamente implantadas na inicialização. + +Aqui está um exemplo de uma política de rede compatível. + +```yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: intra-namespace + namespace: kube-system +spec: + podSelector: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system +``` + +Com as restrições aplicadas, o DNS será bloqueado, a menos que seja permitido propositalmente. Abaixo está uma política de rede que permitirá que o tráfego exista para DNS. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-network-dns-policy + namespace: +spec: + ingress: + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + podSelector: + matchLabels: + k8s-app: kube-dns + policyTypes: + - Ingress +``` + +O metrics-server e o controlador de entrada do Traefik serão bloqueados por padrão se as políticas de rede não forem criadas para permitir o acesso. O Traefik v1, conforme empacotado no K3s versão 1.20 e abaixo, usa rótulos diferentes do Traefik v2. Certifique-se de usar apenas o yaml de amostra abaixo que está associado à versão do Traefik presente no seu cluster. + + + + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-metrics-server + namespace: kube-system +spec: + podSelector: + matchLabels: + k8s-app: metrics-server + ingress: + - {} + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-svclbtraefik-ingress + namespace: kube-system +spec: + podSelector: + matchLabels: + svccontroller.k3s.cattle.io/svcname: traefik + ingress: + - {} + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-traefik-v121-ingress + namespace: kube-system +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: traefik + ingress: + - {} + policyTypes: + - Ingress +--- + +``` + + + + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-metrics-server + namespace: kube-system +spec: + podSelector: + matchLabels: + k8s-app: metrics-server + ingress: + - {} + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-svclbtraefik-ingress + namespace: kube-system +spec: + podSelector: + matchLabels: + svccontroller.k3s.cattle.io/svcname: traefik + ingress: + - {} + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-traefik-v120-ingress + namespace: kube-system +spec: + podSelector: + matchLabels: + app: traefik + ingress: + - {} + policyTypes: + - Ingress +--- + +``` + + + +:::info +Os operadores devem gerenciar políticas de rede normalmente para namespaces adicionais que são criados. +::: + + +### Configuração de auditoria do servidor API + +Os requisitos CIS 1.2.22 a 1.2.25 estão relacionados à configuração de logs de auditoria para o API Server. O K3s não cria por padrão o diretório de log e a política de auditoria, pois os requisitos de auditoria são específicos para as políticas e o ambiente de cada usuário. + +O diretório de log, idealmente, deve ser criado antes de iniciar o K3s. Uma permissão de acesso restritiva é recomendada para evitar vazamento de informações potencialmente sensíveis. + +```bash +sudo mkdir -p -m 700 /var/lib/rancher/k3s/server/logs +``` + +Uma política de auditoria inicial para registrar metadados de solicitação é fornecida abaixo. A política deve ser gravada em um arquivo chamado `audit.yaml` no diretório `/var/lib/rancher/k3s/server`. Informações detalhadas sobre a configuração da política para o servidor de API podem ser encontradas na [documentação](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/) do Kubernetes. + +```yaml +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: Metadata +``` + +Ambas as configurações devem ser passadas como argumentos para o Servidor de API como: + + + + +```yaml +kube-apiserver-arg: + - 'admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml' + - 'audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' + - 'audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' + - 'audit-log-maxage=30' + - 'audit-log-maxbackup=10' + - 'audit-log-maxsize=100' +``` + + + +```bash +--kube-apiserver-arg='audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' +--kube-apiserver-arg='audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' +``` + + + + +O K3s deve ser reiniciado para carregar a nova configuração. + +```bash +sudo systemctl daemon-reload +sudo systemctl restart k3s.service +``` + +## Configuração para Componentes do Kubernetes + + +A configuração abaixo deve ser colocada no [arquivo de configuração](../installation/configuration.md#configuration-file) e contém todas as correções necessárias para proteger os componentes do Kubernetes. + + + + + +```yaml +protect-kernel-defaults: true +secrets-encryption: true +kube-apiserver-arg: + - "enable-admission-plugins=NodeRestriction,EventRateLimit" + - 'admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml' + - 'audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' + - 'audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' + - 'audit-log-maxage=30' + - 'audit-log-maxbackup=10' + - 'audit-log-maxsize=100' +kube-controller-manager-arg: + - 'terminated-pod-gc-threshold=10' +kubelet-arg: + - 'streaming-connection-idle-timeout=5m' + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` + + + + + +```yaml +protect-kernel-defaults: true +secrets-encryption: true +kube-apiserver-arg: + - 'enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount' + - 'audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' + - 'audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' + - 'audit-log-maxage=30' + - 'audit-log-maxbackup=10' + - 'audit-log-maxsize=100' +kube-controller-manager-arg: + - 'terminated-pod-gc-threshold=10' +kubelet-arg: + - 'streaming-connection-idle-timeout=5m' + - 'make-iptables-util-chains=true' + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` + + + + +## Operações Manuais +Os seguintes são controles que o K3s atualmente não passa com a configuração acima aplicada. Esses controles exigem intervenção manual para cumprir totalmente com o CIS Benchmark. + +### Control 1.1.20 +Certifique-se de que as permissões do arquivo de certificado PKI do Kubernetes estejam definidas como 600 ou mais restritivas (Manual) + +
+Remediação +Os arquivos de certificado PKI do K3s são armazenados em `/var/lib/rancher/k3s/server/tls/` com permissão 644. +Para remediar, execute o seguinte comando: +```bash +chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt +``` +
+ +### Control 1.2.9 +Certifique-se de que o plugin de controle de admissão EventRateLimit esteja definido + +
+Remediação +Siga a [documentação do Kubernetes](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit) e defina os limites desejados em um arquivo de configuração. +Para esta e outras configurações psa, esta documentação usa /var/lib/rancher/k3s/server/psa.yaml. +Em seguida, edite o arquivo de configuração do K3s /etc/rancher/k3s/config.yaml e defina os parâmetros abaixo. +```yaml +kube-apiserver-arg: + - "enable-admission-plugins=NodeRestriction,EventRateLimit" + - "admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml" +``` +
+ +### Control 1.2.11 +Certifique-se de que o plugin de controle de admissão AlwaysPullImages esteja definido + +
+Remediação +Permissivo, conforme as diretrizes do CIS, +"Esta configuração pode impactar clusters offline ou isolados, que têm imagens pré-carregadas e +não têm acesso a um registro para extrair imagens em uso. Esta configuração não é apropriada para +clusters que usam esta configuração." +Edite o arquivo de configuração do K3s /etc/rancher/k3s/config.yaml e defina o parâmetro abaixo. +```yaml +kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." +``` +
+ +### Control 1.2.21 +Certifique-se de que o argumento --request-timeout esteja definido como apropriado + +
+Remediação +Permissivo, conforme as diretrizes do CIS, +"é recomendado definir esse limite conforme apropriado e alterar o limite padrão de 60 segundos somente se necessário". +Edite o arquivo de configuração do K3s /etc/rancher/k3s/config.yaml +e defina o parâmetro abaixo se necessário. Por exemplo, +```yaml +kube-apiserver-arg: + - "request-timeout=300s" +``` +
+ +### Control 4.2.13 +Garanta que um limite seja definido nos PIDs do pod + +
+Remediação +Decida um nível apropriado para este parâmetro e defina-o, +Se estiver usando um arquivo de configuração K3s /etc/rancher/k3s/config.yaml, edite o arquivo para definir `podPidsLimit` para +```yaml +kubelet-arg: + - "pod-max-pids=" +``` +
+ +### Control 5.X + +Todos os controles 5.X estão relacionados à configuração de política do Kubernetes. Esses controles não são impostos pelo K3s por padrão. + +Consulte [CIS 1.8 Seção 5](self-assessment-1.8.md#51-rbac-and-service-accounts) para obter mais informações sobre como criar e aplicar essas políticas. + +## Conclusão + +Se você seguiu este guia, seu cluster K3s será configurado para estar em conformidade com o CIS Kubernetes Benchmark. Você pode revisar o [CIS 1.8 Self-Assessment Guide](self-assessment-1.8.md) para entender as expectativas de cada uma das verificações do benchmark e como você pode fazer o mesmo em seu cluster. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/secrets-encryption.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/secrets-encryption.md new file mode 100644 index 000000000..1fdc87a01 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/secrets-encryption.md @@ -0,0 +1,57 @@ +--- +title: Criptografia de Secrets +--- + +# Configurando Criptografia de Secrets + +O K3s suporta habilitar a criptografia de segredos em repouso. Ao iniciar o servidor pela primeira vez, passar o sinalizador `--secrets-encryption` fará o seguinte automaticamente: + +- Gerar uma chave AES-CBC +- Gerar um arquivo de configuração de criptografia com a chave gerada +- Passar a configuração para o KubeAPI como encryption-provider-config + +:::tip +O Secrets-encryption não pode ser habilitado em um servidor existente sem reiniciá-lo. +Use `curl -sfL https://get.k3s.io | sh -s - server --secrets-encryption` se estiver instalando a partir de script ou outros métodos descritos em [Opções de configuração](../installation/configuration.md#configuration-with-install-script). +::: + +Exemplo do arquivo de configuração de criptografia: +```json +{ + "kind": "EncryptionConfiguration", + "apiVersion": "apiserver.config.k8s.io/v1", + "resources": [ + { + "resources": [ + "secrets" + ], + "providers": [ + { + "aescbc": { + "keys": [ + { + "name": "aescbckey", + "secret": "xxxxxxxxxxxxxxxxxxx" + } + ] + } + }, + { + "identity": {} + } + ] + } + ] +} +``` + +## Ferramenta de Criptografia Secrets + +O K3s contém uma ferramenta utilitária `secrets-encrypt`, que permite o controle automático sobre o seguinte: + +- Desabilitando/Habilitando a criptografia de segredos +- Adicionando novas chaves de criptografia +- Girando e excluindo chaves de criptografia +- Recriptografando segredos + +Para obter mais informações, consulte a [documentação do comando `k3s secrets-encrypt`](../cli/secrets-encrypt.md). diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/security.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/security.md new file mode 100644 index 000000000..e61d7b37b --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/security.md @@ -0,0 +1,19 @@ +--- +title: "Segurança" +--- + +Esta seção descreve a metodologia e os meios de proteger um cluster K3s. Ela é dividida em 2 seções. Esses guias assumem que o k3s está sendo executado com etcd incorporado. + +Primeiro, o guia de reforço fornece uma lista de práticas recomendadas de segurança para proteger um cluster K3s. + +* [Hardening Guide](hardening-guide.md) + +Segundo, é a autoavaliação para validar um cluster endurecido. Atualmente, temos duas avaliações diferentes disponíveis: + +* [CIS 1.24 Guia de Autoavaliação do Benchmark](self-assessment-1.24.md), para K3s versão v1.24 + +* [CIS 1.7 Guia de Autoavaliação do Benchmark](self-assessment-1.7.md), para K3s versão v1.25 + +* [CIS 1.8 Guia de Autoavaliação do Benchmark](self-assessment-1.8.md), para K3s versão v1.26-v1.29 + + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.23.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.23.md new file mode 100644 index 000000000..f6b4b9437 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.23.md @@ -0,0 +1,2953 @@ +--- +title: CIS 1.23 Self Assessment Guide +--- + +## Overview + +This document is a companion to the [K3s security hardening guide](hardening-guide.md). The hardening guide provides prescriptive guidance for hardening a production installation of K3s, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the CIS Kubernetes Benchmark. It is to be used by K3s operators, security teams, auditors, and decision-makers. + +This guide is specific to the **v1.22-v1.23** release lines of K3s and the **v1.23** release of the CIS Kubernetes Benchmark. + +For more information about each control, including detailed descriptions and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes). + +### Testing controls methodology + +Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. + +Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. + +These are the possible results for each control: + +- **Pass** - The K3s cluster under test passed the audit outlined in the benchmark. +- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section will explain why this is so. +- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s does not prevent their implementation, but no further configuration or auditing of the cluster under test has been performed. + +This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary and will require you to adjust the "audit" commands to fit your scenario. + +> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. + + +## 1.1 Control Plane Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the +control plane node. +For example, `chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml` + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml` + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 644 /etc/kubernetes/manifests/kube-controller-manager.yaml` + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml` + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 644 /etc/kubernetes/manifests/kube-scheduler.yaml` + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /etc/kubernetes/manifests/kube-scheduler.yaml` + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 644 /etc/kubernetes/manifests/etcd.yaml` + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /etc/kubernetes/manifests/etcd.yaml` + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 644 ` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root ` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 1.1.11 +``` + +**Expected Result**: + +```console +'700' is equal to '700' +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** Not Applicable + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +### 1.1.13 Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /etc/kubernetes/admin.conf + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 scheduler + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root scheduler` + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 controllermanager + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root controllermanager + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit:** + +```bash +find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 644 /etc/kubernetes/pki/*.crt + +**Audit:** + +```bash +stat -c %n %a /var/lib/rancher/k3s/server/tls/*.crt +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 600 /etc/kubernetes/pki/*.key + +**Audit:** + +```bash +stat -c %n %a /var/lib/rancher/k3s/server/tls/*.key +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' +``` + +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the `--token-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep containerd | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 1616 1600 6 13:26 ? 00:01:28 containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd root 2318 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id b41ec3297be4625c2406ad8b7b4f8b91cddd60850c420050c4c3273f809b3e7e -address /run/k3s/containerd/containerd.sock root 2341 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id e7999a65ae0a4e9969f32317ec48ae4f7071b62f92e5236696737973be77c2e1 -address /run/k3s/containerd/containerd.sock root 3199 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 90c4e63d6ee29d40a48c2fdaf2738c2472cba1139dde8a550466c452184f8528 -address /run/k3s/containerd/containerd.sock root 3923 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id be5f4b9bd1ed9239362b7000b47f353acb8bc8ca52a9c9145cba0e902ec1c4b9 -address /run/k3s/containerd/containerd.sock root 4559 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 04cd40ea6b6078797f177c902c89412c70e523ad2a687a62829bf1d16ff0e19c -address /run/k3s/containerd/containerd.sock root 4647 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 48f37a480315b6adce2d2a5c5d67a85412dd0ba7a2e82816434e0deb9fa75de9 -address /run/k3s/containerd/containerd.sock root 6610 1 0 13:47 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 1cf71c22f568468055e517ab363437c0e54e45274c64024d337cc5bcce66341d -address /run/k3s/containerd/containerd.sock +``` + +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the `DenyServiceExternalIPs` +from enabled admission plugins. + +**Audit:** + +```bash +/bin/ps -ef | grep containerd | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' is present OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 1616 1600 6 13:26 ? 00:01:28 containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd root 2318 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id b41ec3297be4625c2406ad8b7b4f8b91cddd60850c420050c4c3273f809b3e7e -address /run/k3s/containerd/containerd.sock root 2341 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id e7999a65ae0a4e9969f32317ec48ae4f7071b62f92e5236696737973be77c2e1 -address /run/k3s/containerd/containerd.sock root 3199 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 90c4e63d6ee29d40a48c2fdaf2738c2472cba1139dde8a550466c452184f8528 -address /run/k3s/containerd/containerd.sock root 3923 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id be5f4b9bd1ed9239362b7000b47f353acb8bc8ca52a9c9145cba0e902ec1c4b9 -address /run/k3s/containerd/containerd.sock root 4559 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 04cd40ea6b6078797f177c902c89412c70e523ad2a687a62829bf1d16ff0e19c -address /run/k3s/containerd/containerd.sock root 4647 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 48f37a480315b6adce2d2a5c5d67a85412dd0ba7a2e82816434e0deb9fa75de9 -address /run/k3s/containerd/containerd.sock root 6610 1 0 13:47 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 1cf71c22f568468055e517ab363437c0e54e45274c64024d337cc5bcce66341d -address /run/k3s/containerd/containerd.sock +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the --kubelet-https parameter. + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +kubelet client certificate and key parameters as below. +``` +--kubelet-client-certificate= +--kubelet-client-key= +``` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority +`--kubelet-certificate-authority=`. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'Node' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, +for example `--authorization-mode=Node,RBAC`. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'RBAC' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameters. +``` +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= +``` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep containerd | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' is present +``` + +**Returned Value**: + +```console +root 1616 1600 6 13:26 ? 00:01:28 containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd root 2318 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id b41ec3297be4625c2406ad8b7b4f8b91cddd60850c420050c4c3273f809b3e7e -address /run/k3s/containerd/containerd.sock root 2341 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id e7999a65ae0a4e9969f32317ec48ae4f7071b62f92e5236696737973be77c2e1 -address /run/k3s/containerd/containerd.sock root 3199 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 90c4e63d6ee29d40a48c2fdaf2738c2472cba1139dde8a550466c452184f8528 -address /run/k3s/containerd/containerd.sock root 3923 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id be5f4b9bd1ed9239362b7000b47f353acb8bc8ca52a9c9145cba0e902ec1c4b9 -address /run/k3s/containerd/containerd.sock root 4559 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 04cd40ea6b6078797f177c902c89412c70e523ad2a687a62829bf1d16ff0e19c -address /run/k3s/containerd/containerd.sock root 4647 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 48f37a480315b6adce2d2a5c5d67a85412dd0ba7a2e82816434e0deb9fa75de9 -address /run/k3s/containerd/containerd.sock root 6610 1 0 13:47 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 1cf71c22f568468055e517ab363437c0e54e45274c64024d337cc5bcce66341d -address /run/k3s/containerd/containerd.sock +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'SecurityContextDeny' OR '--enable-admission-plugins' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.16 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.17 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port' +``` + +**Expected Result**: + +```console +'--secure-port' is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.18 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling' +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.19 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +--audit-log-path=/var/log/apiserver/audit.log + +### 1.2.20 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxage parameter to 30 +or as an appropriate number of days, for example, +--audit-log-maxage=30 + +### 1.2.21 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. For example, +--audit-log-maxbackup=10 + +### 1.2.22 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB, --audit-log-maxsize=100 + +### 1.2.24 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR '--service-account-lookup' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.25 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --service-account-key-file parameter +to the public key file for service accounts. For example, +`--service-account-key-file=`. + +### 1.2.26 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate and key file parameters. +``` +--etcd-certfile= +--etcd-keyfile= +``` +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 1.2.29 +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +--etcd-certfile AND --etcd-keyfile +``` + +### 1.2.27 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the TLS certificate and private key file parameters. +``` +--tls-cert-file= +--tls-private-key-file= +``` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2 +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` + +### 1.2.28 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the client certificate authority file. +`--client-ca-file=` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file' +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.29 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate authority file parameter. +`--etcd-cafile=` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile' +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.30 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --encryption-provider-config parameter to the path of that file. +For example, `--encryption-provider-config=` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config' +``` + +### 1.2.31 Ensure that encryption providers are appropriately configured (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit:** + +```bash +grep aescbc /path/to/encryption-config.json +``` + +### 1.2.32 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, +TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, +TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, +TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites' +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) + + +**Result:** warn + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example, --terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold' +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling' +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials' +``` + +**Expected Result**: + +```console +'--use-service-account-credentials' is not equal to 'false' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --service-account-private-key-file parameter +to the private key file for service accounts. For example, +`--service-account-private-key-file=`. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file' +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --root-ca-file parameter to the certificate bundle file. +`--root-ca-file=` + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file' +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep containerd | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' is present OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 1616 1600 6 13:26 ? 00:01:28 containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd root 2318 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id b41ec3297be4625c2406ad8b7b4f8b91cddd60850c420050c4c3273f809b3e7e -address /run/k3s/containerd/containerd.sock root 2341 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id e7999a65ae0a4e9969f32317ec48ae4f7071b62f92e5236696737973be77c2e1 -address /run/k3s/containerd/containerd.sock root 3199 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 90c4e63d6ee29d40a48c2fdaf2738c2472cba1139dde8a550466c452184f8528 -address /run/k3s/containerd/containerd.sock root 3923 1 0 13:27 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id be5f4b9bd1ed9239362b7000b47f353acb8bc8ca52a9c9145cba0e902ec1c4b9 -address /run/k3s/containerd/containerd.sock root 4559 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 04cd40ea6b6078797f177c902c89412c70e523ad2a687a62829bf1d16ff0e19c -address /run/k3s/containerd/containerd.sock root 4647 1 0 13:28 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 48f37a480315b6adce2d2a5c5d67a85412dd0ba7a2e82816434e0deb9fa75de9 -address /run/k3s/containerd/containerd.sock root 6610 1 0 13:47 ? 00:00:00 /var/lib/rancher/k3s/data/577968fa3d58539cc4265245941b7be688833e6bf5ad7869fa2afe02f15f1cd2/bin/containerd-shim-runc-v2 -namespace k8s.io -id 1cf71c22f568468055e517ab363437c0e54e45274c64024d337cc5bcce66341d -address /run/k3s/containerd/containerd.sock +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address' +``` + +**Expected Result**: + +```console +'--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` + +## 2 Etcd Node Configuration +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +``` +--cert-file= +--key-file= +``` + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.1 +``` + +**Expected Result**: + +```console +'cert-file' is present AND 'key-file' is present +``` + +**Returned Value**: + +```console +cert-file AND key-file cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key cert-file AND key-file +``` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.2 +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'client-cert-auth' is equal to 'true' +``` + +**Returned Value**: + +```console +--client-cert-auth=true client-cert-auth: true --client-cert-auth=true +``` + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.3 +``` + +**Expected Result**: + +```console +'ETCD_AUTO_TLS' is not present OR 'ETCD_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +error: process ID list syntax error Usage: ps [options] Try 'ps --help ' or 'ps --help ' for additional help text. For more details see ps(1). cat: /proc//environ: No such file or directory error: process ID list syntax error Usage: ps [options] Try 'ps --help ' or 'ps --help ' for additional help text. For more details see ps(1). cat: /proc//environ: No such file or directory error: process ID list syntax error Usage: ps [options] Try 'ps --help ' or 'ps --help ' for additional help text. For more details see ps(1). cat: /proc//environ: No such file or directory +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameters. +``` +--peer-client-file= +--peer-key-file= +``` + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.4 +``` + +**Expected Result**: + +```console +'cert-file' is present AND 'key-file' is present +``` + +**Returned Value**: + +```console +peer-cert-file AND peer-key-file cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key peer-cert-file AND peer-key-file +``` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.5 +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'client-cert-auth' is equal to 'true' +``` + +**Returned Value**: + +```console +--client-cert-auth=true client-cert-auth: true --client-cert-auth=true +``` + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.6 +``` + +**Expected Result**: + +```console +'--peer-auto-tls' is not present OR '--peer-auto-tls' is equal to 'false' +``` + +**Returned Value**: + +```console +--peer-auto-tls=false error: process ID list syntax error Usage: ps [options] Try 'ps --help ' or 'ps --help ' for additional help text. For more details see ps(1). cat: /proc//environ: No such file or directory --peer-auto-tls=false +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Manual) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameter. +`--trusted-ca-file=` + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.7 +``` + +**Expected Result**: + +```console +'trusted-ca-file' is present +``` + +**Returned Value**: + +```console +--trusted-ca-file trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt --trusted-ca-file +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Manual) + + +**Result:** warn + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file' +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Review the audit policy provided for the cluster and ensure that it covers +at least the following areas, +- Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. +- Modification of Pod and Deployment objects. +- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. +For most requests, minimally logging at the Metadata level is recommended +(the most basic level of logging). + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is present OR '/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +644 644 +``` + +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is present OR '/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +root:root root:root +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 644 +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +--client-ca-file: `chmod 644 ` + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt +``` + +**Expected Result**: + +```console +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +**Returned Value**: + +```console +644 600 +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file: +`chown root:root `. + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root root:root +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/kubelet/config.yaml + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/kubelet/config.yaml + +## 4.2 Kubelet +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to +`false`. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +`--anonymous-auth=false` +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi' +``` + +**Expected Result**: + +```console +'--anonymous-auth' is equal to 'false' +``` + +**Returned Value**: + +```console +--anonymous-auth=false Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi' +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +--authorization-mode=Webhook Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +`--client-ca-file=` +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi' +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt Sep 13 13:26:40 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:40Z" level=info msg="Running kube-apiserver --advertise-address=172.31.0.140 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' +``` + +**Expected Result**: + +```console +'--read-only-port' is equal to '0' OR '--read-only-port' is not present +``` + +**Returned Value**: + +```console +Sep 13 13:26:50 k3s-123-cis-pool2-98604672-hr9p5 k3s[1592]: time="2022-09-13T13:26:50Z" level=info msg="Running kubelet --address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=k3s-123-cis-pool2-98604672-hr9p5 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --node-labels=rke.cattle.io/machine=00c4e7a0-5497-4367-a70c-0b836757eae8 --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" Sep 13 13:26:44 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:44Z" level=info msg="Running kubelet --address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=k3s-123-cis-pool3-b403f678-bzdg5 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --node-labels=rke.cattle.io/machine=109d596c-89f5-4c10-8c7f-6b82a38edd8f --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout' +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC containerd +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `tlsCertFile` to the location +of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +``` +--tls-cert-file= +--tls-private-key-file= +``` +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +Sep 13 13:26:50 k3s-123-cis-pool2-98604672-hr9p5 k3s[1592]: time="2022-09-13T13:26:50Z" level=info msg="Running kubelet --address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=k3s-123-cis-pool2-98604672-hr9p5 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --node-labels=rke.cattle.io/machine=00c4e7a0-5497-4367-a70c-0b836757eae8 --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" Sep 13 13:26:44 k3s-123-cis-pool3-b403f678-bzdg5 k3s[1600]: time="2022-09-13T13:26:44Z" level=info msg="Running kubelet --address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=k3s-123-cis-pool3-b403f678-bzdg5 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --node-labels=rke.cattle.io/machine=109d596c-89f5-4c10-8c7f-6b82a38edd8f --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + + +**Result:** Not Applicable + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `TLSCipherSuites` to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC containerd +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Manual) + + +**Result:** warn + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + + +**Result:** warn + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +## 5.2 Pod Security Standards +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +### 5.2.7 Minimize the admission of root containers (Automated) + + +**Result:** warn + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + + +**Result:** warn + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilities in applications running on your cluster. Where a namespace +contains applications which do not require any Linux capabilities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports NetworkPolicies (Manual) + + +**Result:** warn + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have NetworkPolicies defined (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +## 5.4 Secrets Management +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.24.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.24.md new file mode 100644 index 000000000..267188376 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.24.md @@ -0,0 +1,2819 @@ +--- +title: CIS 1.24 Self Assessment Guide +--- + +## Overview + +This document is a companion to the [K3s security hardening guide](hardening-guide.md). The hardening guide provides prescriptive guidance for hardening a production installation of K3s, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the CIS Kubernetes Benchmark. It is to be used by K3s operators, security teams, auditors, and decision-makers. + +This guide is specific to the **v1.24** release line of K3s and the **v1.24** release of the CIS Kubernetes Benchmark. + +For more information about each control, including detailed descriptions and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes). + +### Testing controls methodology + +Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. + +Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. + +These are the possible results for each control: + +- **Pass** - The K3s cluster under test passed the audit outlined in the benchmark. +- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section will explain why this is so. +- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s does not prevent their implementation, but no further configuration or auditing of the cluster under test has been performed. + +This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary and will require you to adjust the "audit" commands to fit your scenario. + +## 1.1 Control Plane Node Configuration Files + +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + +**Result:** Not Applicable + +**Rationale:** + +The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +if [ "$(journalctl -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd +else + echo "permissions=700" +fi +``` + +**Expected Result:** permissions has permissions 700, expected 700 or more restrictive + +
+Returned Value: + +```console +permissions=700 +``` +
+ +
+Remediation: + +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +`chmod 700 /var/lib/etcd` +
+ +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + +**Result:** Not Applicable + +**Rationale:** + +For K3s, etcd is embedded within the k3s process. There is no separate etcd process. +Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + +### 1.1.13 Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** INFO + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig` + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig` +
+ +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chmod 600 /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig` +
+ +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig` +
+ +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chmod 600 /var/lib/rancher/k3s/server/cred/controller.kubeconfig` +
+ +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root /var/lib/rancher/k3s/server/cred/controller.kubeconfig` +
+ +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown -R root:root /etc/kubernetes/pki/` +
+ +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual) + +**Result:** WARN + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +`chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the master node. +For example, +`chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key` +
+ +## 1.2 API Server + +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' +``` + +**Expected Result:** '--anonymous-auth' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --anonymous-auth argument to false. If it is set to true, +edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. +``` +kube-apiserver-arg: + - "anonymous-auth=true" +``` +
+ +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--token-auth-file' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Follow the documentation and configure alternate mechanisms for authentication. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. +``` +kube-apiserver-arg: + - "token-auth-file=" +``` +
+ +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--enable-admission-plugins' does not have 'DenyServiceExternalIPs' OR '--enable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set DenyServiceExternalIPs. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" +``` +
+ +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + +**Result:** INFO + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the --kubelet-https parameter. + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result:** '--kubelet-client-certificate' is present AND '--kubelet-client-key' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the kubelet client certificate and key. +They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key +If for some reason you need to provide your own certificate and key, you can set the +below parameters in the K3s config file /etc/rancher/k3s/config.yaml. +``` +kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" +``` +
+ +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result:** '--kubelet-certificate-authority' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. +--kubelet-certificate-authority=<ca-string> +
+ +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' does not have 'AlwaysAllow' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --authorization-mode to AlwaysAllow. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" +``` +
+ +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' has 'Node' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --authorization-mode to Node and RBAC. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, +ensure that you are not overriding authorization-mode. +
+ +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' has 'RBAC' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --authorization-mode to Node and RBAC. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, +ensure that you are not overriding authorization-mode. +
+ +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" +``` +
+ +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + +**Result:** WARN + +**Remediation:** +Permissive, per CIS guidelines, +"This setting could impact offline or isolated clusters, which have images pre-loaded and +do not have access to a registry to pull in-use images. This setting is not appropriate for +clusters which use this configuration." +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' has 'SecurityContextDeny' OR '--enable-admission-plugins' has 'PodSecurityPolicy' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... +
+ +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'ServiceAccount' +``` + +**Expected Result:** '--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --disable-admission-plugins to anything. +Follow the documentation and create ServiceAccount objects as per your environment. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" +``` +
+ +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --disable-admission-plugins to anything. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." +``` +
+ +### 1.2.16 Ensure that the admission control plugin NodeRestriction is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' has 'NodeRestriction' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --enable-admission-plugins to NodeRestriction. +If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. +If you are, include NodeRestriction in the list. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." +``` +
+ +### 1.2.17 Ensure that the --secure-port argument is not set to 0 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port' +``` + +**Expected Result:** '--secure-port' is greater than 0 OR '--secure-port' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the secure port to 6444. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "secure-port=" +``` +
+ +### 1.2.18 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "profiling=true" +``` +
+ +### 1.2.19 Ensure that the --audit-log-path argument is set (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-path' +``` + +**Expected Result:** '--audit-log-path' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +``` +kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" +``` +
+ +### 1.2.20 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxage' +``` + +**Expected Result:** '--audit-log-maxage' is greater or equal to 30 + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, +``` +kube-apiserver-arg: + - "audit-log-maxage=30" +``` +
+ +### 1.2.21 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxbackup' +``` + +**Expected Result:** '--audit-log-maxbackup' is greater or equal to 10 + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, +``` +kube-apiserver-arg: + - "audit-log-maxbackup=10" +``` +
+ +### 1.2.22 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxsize' +``` + +**Expected Result:** '--audit-log-maxsize' is greater or equal to 100 + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxsize parameter to an appropriate size in MB. For example, +``` +kube-apiserver-arg: + - "audit-log-maxsize=100" +``` +
+ +### 1.2.23 Ensure that the --request-timeout argument is set as appropriate (Manual) + +**Result:** WARN + +**Remediation:** +Permissive, per CIS guidelines, +"it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". +Edit the K3s config file /etc/rancher/k3s/config.yaml +and set the below parameter if needed. For example, +``` +kube-apiserver-arg: + - "request-timeout=300s" +``` + +### 1.2.24 Ensure that the --service-account-lookup argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--service-account-lookup' is not present OR '--service-account-lookup' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --service-account-lookup argument. +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, +``` +kube-apiserver-arg: + - "service-account-lookup=true" +``` +Alternatively, you can delete the service-account-lookup parameter from this file so +that the default takes effect. +
+ +### 1.2.25 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-key-file' +``` + +**Expected Result:** '--service-account-key-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s automatically generates and sets the service account key file. +It is located at /var/lib/rancher/k3s/server/tls/service.key. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "service-account-key-file=" +``` +
+ +### 1.2.26 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +if [ "$(journalctl -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 +else + echo "--etcd-certfile AND --etcd-keyfile" +fi +``` + +**Expected Result:** '--etcd-certfile' is present AND '--etcd-keyfile' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s automatically generates and sets the etcd certificate and key files. +They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" +``` +
+ +### 1.2.27 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2 +``` + +**Expected Result:** '--tls-cert-file' is present AND '--tls-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. +They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" +``` +
+ +### 1.2.28 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file' +``` + +**Expected Result:** '--client-ca-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the client certificate authority file. +It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "client-ca-file=" +``` +
+ +### 1.2.29 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile' +``` + +**Expected Result:** '--etcd-cafile' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the etcd certificate authority file. +It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "etcd-cafile=" +``` +
+ +### 1.2.30 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config' +``` + +**Expected Result:** '--encryption-provider-config' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s can be configured to use encryption providers to encrypt secrets at rest. +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. +secrets-encryption: true +Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. +If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. +
+ +### 1.2.31 Ensure that encryption providers are appropriately configured (Manual) + +**Result:** PASS + +**Audit:** +```bash +ENCRYPTION_PROVIDER_CONFIG=$(journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') +if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi +``` + +**Expected Result:** 'provider' contains valid elements from 'aescbc,kms,secretbox' + +
+Returned Value: + +```console +provider=aescbc +``` +
+ +
+Remediation: + +K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. +secrets-encryption: true +Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. +If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json +
+ +### 1.2.32 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites' +``` + +**Expected Result:** '--tls-cipher-suites' contains valid elements from 'TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. +If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. +If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: +``` +kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` +
+ +## 1.3 Controller Manager + +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold' +``` + +**Expected Result:** '--terminated-pod-gc-threshold' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node +and set the --terminated-pod-gc-threshold to an appropriate threshold, +``` +kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" +``` +
+ +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "profiling=true" +``` +
+ +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials' +``` + +**Expected Result:** '--use-service-account-credentials' is not equal to 'false' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --use-service-account-credentials argument to true. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "use-service-account-credentials=false" +``` +
+ +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file' +``` + +**Expected Result:** '--service-account-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the service account private key file. +It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "service-account-private-key-file=" +``` +
+ +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file' +``` + +**Expected Result:** '--root-ca-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the root CA file. +It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "root-ca-file=" +``` +
+ +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 +``` + +**Expected Result:** '--feature-gates' does not have 'RotateKubeletServerCertificate=false' OR '--feature-gates' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s does not set the RotateKubeletServerCertificate feature gate. +If you have enabled this feature gate, you should remove it. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" +``` +
+ +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'bind-address' +``` + +**Expected Result:** '--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --bind-address argument to 127.0.0.1 +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "bind-address=" +``` +
+ +## 1.4 Scheduler + +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-scheduler-arg: + - "profiling=true" +``` +
+ +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address' +``` + +**Expected Result:** '--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s sets the --bind-address argument to 127.0.0.1 +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-scheduler-arg: + - "bind-address=" +``` +
+ +## 2 Etcd Node Configuration + +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.cert-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/server-client.crt' AND '.client-transport-security.key-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/server-client.key' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates cert and key files for etcd. +These are located in /var/lib/rancher/k3s/server/tls/etcd/. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use custom cert and key files. +
+ +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.client-cert-auth' is equal to 'true' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to disable client certificate authentication. +
+ +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.auto-tls' is present OR '.client-transport-security.auto-tls' is not present + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s does not set the --auto-tls parameter. +If this check fails, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --auto-tls parameter or set it to false. +client-transport-security: + auto-tls: false +
+ +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.cert-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt' AND '.peer-transport-security.key-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates peer cert and key files for etcd. +These are located in /var/lib/rancher/k3s/server/tls/etcd/. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use custom peer cert and key files. +
+ +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.client-cert-auth' is equal to 'true' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to disable peer client certificate authentication. +
+ +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.auto-tls' is present OR '.peer-transport-security.auto-tls' is not present + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. +If this check fails, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --peer-auto-tls parameter or set it to false. +peer-transport-security: + auto-tls: false +
+ +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.trusted-ca-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-ee1de912=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-ee1de912 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates a unique certificate authority for etcd. +This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use a shared certificate authority. +
+ +## 4.1 Worker Node Configuration Files + +### 4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chmod 600 /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig` +
+ +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, `chown root:root /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig` +
+ +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chmod 600 /var/lib/rancher/k3s/agent/kubelet.kubeconfig` +
+ +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chown root:root /var/lib/rancher/k3s/agent/kubelet.kubeconfig` +
+ +### 4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c permissions=%a /var/lib/rancher/k3s/agent/client-ca.crt +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the following command to modify the file permissions of the +--client-ca-file `chmod 600 /var/lib/rancher/k3s/agent/client-ca.crt` +
+ +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/client-ca.crt +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the following command to modify the ownership of the --client-ca-file. +`chown root:root /var/lib/rancher/k3s/agent/client-ca.crt` +
+ +### 4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + +### 4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + +## 4.2 Kubelet + +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi' +``` + +**Expected Result:** '--anonymous-auth' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you +should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "anonymous-auth=true" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="anonymous-auth=true" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi' +``` + +**Expected Result:** '--authorization-mode' does not have 'AlwaysAllow' + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --authorization-mode to AlwaysAllow. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "authorization-mode=AlwaysAllow" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="authorization-mode=AlwaysAllow" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi' +``` + +**Expected Result:** '--client-ca-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:04 server-0 k3s[2366]: time="2024-08-09T18:56:04Z" level=info msg="Running kube-apiserver --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount,PodSecurityPolicy --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the client ca certificate for the Kubelet. +It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt +
+ +### 4.2.4 Verify that the --read-only-port argument is set to 0 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--read-only-port' is equal to '0' OR '--read-only-port' is not present + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you +should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "read-only-port=XXXX" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="read-only-port=XXXX" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--streaming-connection-idle-timeout' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. +``` +kubelet-arg: + - "streaming-connection-idle-timeout=5m" +``` +If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--protect-kernel-defaults' is equal to 'true' + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. +protect-kernel-defaults: true +If using the command line, run K3s with --protect-kernel-defaults=true. +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--make-iptables-util-chains' is equal to 'true' OR '--make-iptables-util-chains' is not present + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. +``` +kubelet-arg: + - "make-iptables-util-chains=true" +``` +If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.8 Ensure that the --hostname-override argument is not set (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply +with cloud providers that require this flag to ensure that hostname matches node names. + +### 4.2.9 Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--event-qps' is equal to '0' + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s sets the event-qps to 0. Should you wish to change this, +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. +``` +kubelet-arg: + - "event-qps=" +``` +If using the command line, run K3s with --kubelet-arg="event-qps=<value>". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--tls-cert-file' is present AND '--tls-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the TLS certificate and private key for the Kubelet. +They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key +If for some reason you need to provide your own certificate and key, you can set the +below parameters in the K3s config file /etc/rancher/k3s/config.yaml. +``` +kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" +``` +
+ +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--rotate-certificates' is present OR '--rotate-certificates' is not present + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. +If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** 'RotateKubeletServerCertificate' is present OR 'RotateKubeletServerCertificate' is not present + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the RotateKubeletServerCertificate feature gate. +If you have enabled this feature gate, you should remove it. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. +If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--tls-cipher-suites' contains valid elements from 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256' + +
+Returned Value: + +```console +Aug 09 18:56:06 server-0 k3s[2366]: time="2024-08-09T18:56:06Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to +``` +kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` +or to a subset of these values. +If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=<same values as above>" +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +## 5.1 RBAC and Service Accounts + +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + +**Result:** WARN + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + +**Result:** WARN + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Manual) + +**Result:** WARN + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + +**Result:** WARN + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + +**Result:** WARN + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +## 5.2 Pod Security Standards + +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + +**Result:** WARN + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +### 5.2.7 Minimize the admission of root containers (Automated) + +**Result:** WARN + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + +**Result:** WARN + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + +**Result:** WARN + +**Remediation:** +Review the use of capabilities in applications running on your cluster. Where a namespace +contains applications which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI + +### 5.3.1 Ensure that the CNI in use supports NetworkPolicies (Manual) + +**Result:** WARN + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have NetworkPolicies defined (Manual) + +**Result:** WARN + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +## 5.4 Secrets Management + +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + +**Result:** WARN + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + +**Result:** WARN + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control + +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies + +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + +**Result:** WARN + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + +**Result:** WARN + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + +**Result:** WARN + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.7.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.7.md new file mode 100644 index 000000000..ab5d6de25 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.7.md @@ -0,0 +1,2828 @@ +--- +title: CIS 1.7 Self Assessment Guide +--- + +## Overview + +This document is a companion to the [K3s security hardening guide](hardening-guide.md). The hardening guide provides prescriptive guidance for hardening a production installation of K3s, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the CIS Kubernetes Benchmark. It is to be used by K3s operators, security teams, auditors, and decision-makers. + +This guide is specific to the **v1.25** release line of K3s and the **v1.7.1** release of the CIS Kubernetes Benchmark. + +For more information about each control, including detailed descriptions and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.7.1. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes). + +### Testing controls methodology + +Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. + +Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. + +These are the possible results for each control: + +- **Pass** - The K3s cluster under test passed the audit outlined in the benchmark. +- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section will explain why this is so. +- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s does not prevent their implementation, but no further configuration or auditing of the cluster under test has been performed. + +This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary and will require you to adjust the "audit" commands to fit your scenario. + +:::note + +Only `scored` test, also know as `automated` tests are covered in this guide. +::: + +## 1.1 Control Plane Node Configuration Files + +### 1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual) + +**Result:** WARN + +**Remediation:** +By default, K3s sets the CNI file permissions to 644. +Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. +If you modify your CNI configuration, ensure that the permissions are set to 600. +For example, `chmod 600 /var/lib/cni/networks/` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +ps -ef | grep containerd | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G +find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +root:root +root:root +root:root +root:root +root:root +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root ` +
+ +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +if [ "$(journalctl -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd +else + echo "permissions=700" +fi +``` + +**Expected Result:** permissions has permissions 700, expected 700 or more restrictive + +
+Returned Value: + +```console +permissions=700 +``` +
+ +
+Remediation: + +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +`chmod 700 /var/lib/rancher/k3s/server/db/etcd` +
+ +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + +**Result:** Not Applicable + +**Rationale:** + +For K3s, etcd is embedded within the k3s process. There is no separate etcd process. +Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + +### 1.1.13 Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig` +
+ +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig` +
+ +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chmod 600 /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig` +
+ +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig` +
+ +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chmod 600 /var/lib/rancher/k3s/server/cred/controller.kubeconfig` +
+ +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/cred/controller.kubeconfig +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root /var/lib/rancher/k3s/server/cred/controller.kubeconfig` +
+ +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown -R root:root /var/lib/rancher/k3s/server/tls` +
+ +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual) + +**Result:** WARN + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +`chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the master node. +For example, +`chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key` +
+ +## 1.2 API Server + +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' +``` + +**Expected Result:** '--anonymous-auth' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --anonymous-auth argument to false. If it is set to true, +edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. +``` +kube-apiserver-arg: + - "anonymous-auth=true" +``` +
+ +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--token-auth-file' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Follow the documentation and configure alternate mechanisms for authentication. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. +``` +kube-apiserver-arg: + - "token-auth-file=" +``` +
+ +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--enable-admission-plugins' does not have 'DenyServiceExternalIPs' OR '--enable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set DenyServiceExternalIPs. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" +``` +
+ +### 1.2.4 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result:** '--kubelet-client-certificate' is present AND '--kubelet-client-key' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the kubelet client certificate and key. +They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key +If for some reason you need to provide your own certificate and key, you can set the +below parameters in the K3s config file /etc/rancher/k3s/config.yaml. +``` +kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" +``` +
+ +### 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result:** '--kubelet-certificate-authority' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "kubelet-certificate-authority=" +``` +
+ +### 1.2.6 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' does not have 'AlwaysAllow' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --authorization-mode to AlwaysAllow. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" +``` +
+ +### 1.2.7 Ensure that the --authorization-mode argument includes Node (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' has 'Node' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --authorization-mode to Node and RBAC. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, +ensure that you are not overriding authorization-mode. +
+ +### 1.2.8 Ensure that the --authorization-mode argument includes RBAC (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' has 'RBAC' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --authorization-mode to Node and RBAC. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, +ensure that you are not overriding authorization-mode. +
+ +### 1.2.9 Ensure that the admission control plugin EventRateLimit is set (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" +``` + +### 1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" +``` +
+ +### 1.2.11 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + +**Result:** WARN + +**Remediation:** +Permissive, per CIS guidelines, +"This setting could impact offline or isolated clusters, which have images pre-loaded and +do not have access to a registry to pull in-use images. This setting is not appropriate for +clusters which use this configuration." +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." +``` + +### 1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + +**Result:** Not Applicable + +**Rationale:** + +Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. + +### 1.2.13 Ensure that the admission control plugin ServiceAccount is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --disable-admission-plugins to anything. +Follow the documentation and create ServiceAccount objects as per your environment. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" +``` +
+ +### 1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --disable-admission-plugins to anything. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." +``` +
+ +### 1.2.15 Ensure that the admission control plugin NodeRestriction is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' has 'NodeRestriction' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --enable-admission-plugins to NodeRestriction. +If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. +If you are, include NodeRestriction in the list. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." +``` +
+ +### 1.2.16 Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port' +``` + +**Expected Result:** '--secure-port' is greater than 0 OR '--secure-port' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the secure port to 6444. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "secure-port=" +``` +
+ +### 1.2.17 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "profiling=true" +``` +
+ +### 1.2.18 Ensure that the --audit-log-path argument is set (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-path' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +``` +kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" +``` +
+ +### 1.2.19 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-maxage' is greater or equal to 30 + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, +``` +kube-apiserver-arg: + - "audit-log-maxage=30" +``` +
+ +### 1.2.20 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-maxbackup' is greater or equal to 10 + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, +``` +kube-apiserver-arg: + - "audit-log-maxbackup=10" +``` +
+ +### 1.2.21 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-maxsize' is greater or equal to 100 + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxsize parameter to an appropriate size in MB. For example, +``` +kube-apiserver-arg: + - "audit-log-maxsize=100" +``` +
+ +### 1.2.22 Ensure that the --request-timeout argument is set as appropriate (Manual) + +**Result:** WARN + +**Remediation:** +Permissive, per CIS guidelines, +"it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". +Edit the K3s config file /etc/rancher/k3s/config.yaml +and set the below parameter if needed. For example, +``` +kube-apiserver-arg: + - "request-timeout=300s" +``` + +### 1.2.23 Ensure that the --service-account-lookup argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--service-account-lookup' is not present OR '--service-account-lookup' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --service-account-lookup argument. +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, +``` +kube-apiserver-arg: + - "service-account-lookup=true" +``` +Alternatively, you can delete the service-account-lookup parameter from this file so +that the default takes effect. +
+ +### 1.2.24 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--service-account-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s automatically generates and sets the service account key file. +It is located at /var/lib/rancher/k3s/server/tls/service.key. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "service-account-key-file=" +``` +
+ +### 1.2.25 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +if [ "$(journalctl -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 +else + echo "--etcd-certfile AND --etcd-keyfile" +fi +``` + +**Expected Result:** '--etcd-certfile' is present AND '--etcd-keyfile' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s automatically generates and sets the etcd certificate and key files. +They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" +``` +
+ +### 1.2.26 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2 +``` + +**Expected Result:** '--tls-cert-file' is present AND '--tls-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. +They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" +``` +
+ +### 1.2.27 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file' +``` + +**Expected Result:** '--client-ca-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the client certificate authority file. +It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "client-ca-file=" +``` +
+ +### 1.2.28 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile' +``` + +**Expected Result:** '--etcd-cafile' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the etcd certificate authority file. +It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "etcd-cafile=" +``` +
+ +### 1.2.29 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config' +``` + +**Expected Result:** '--encryption-provider-config' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s can be configured to use encryption providers to encrypt secrets at rest. +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. +secrets-encryption: true +Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. +If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. +
+ +### 1.2.30 Ensure that encryption providers are appropriately configured (Manual) + +**Result:** PASS + +**Audit:** +```bash +ENCRYPTION_PROVIDER_CONFIG=$(journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') +if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi +``` + +**Expected Result:** 'provider' contains valid elements from 'aescbc,kms,secretbox' + +
+Returned Value: + +```console +provider=aescbc +``` +
+ +
+Remediation: + +K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. +secrets-encryption: true +Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. +If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json +
+ +### 1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites' +``` + +**Expected Result:** '--tls-cipher-suites' contains valid elements from 'TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. +If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. +If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: +``` +kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` +
+ +## 1.3 Controller Manager + +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold' +``` + +**Expected Result:** '--terminated-pod-gc-threshold' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node +and set the --terminated-pod-gc-threshold to an appropriate threshold, +``` +kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" +``` +
+ +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "profiling=true" +``` +
+ +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials' +``` + +**Expected Result:** '--use-service-account-credentials' is not equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --use-service-account-credentials argument to true. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "use-service-account-credentials=false" +``` +
+ +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file' +``` + +**Expected Result:** '--service-account-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the service account private key file. +It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "service-account-private-key-file=" +``` +
+ +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file' +``` + +**Expected Result:** '--root-ca-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the root CA file. +It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "root-ca-file=" +``` +
+ +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 +``` + +**Expected Result:** '--feature-gates' does not have 'RotateKubeletServerCertificate=false' OR '--feature-gates' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s does not set the RotateKubeletServerCertificate feature gate. +If you have enabled this feature gate, you should remove it. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" +``` +
+ +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/ps -ef | grep containerd | grep -v grep +``` + +**Expected Result:** '--bind-address' is present OR '--bind-address' is not present + +
+Returned Value: + +```console +root 2372 2354 4 19:01 ? 00:00:05 containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd +root 3128 1 0 19:01 ? 00:00:00 /var/lib/rancher/k3s/data/0f1a87835be3817408b496b439fddb9ea54cab4298db472792bb1b1cbdc210bc/bin/containerd-shim-runc-v2 -namespace k8s.io -id 878d74b0d77d904ec40cd1db71956f2edeb68ab420227a5a42e6d25f249a140a -address /run/k3s/containerd/containerd.sock +root 3239 1 0 19:01 ? 00:00:00 /var/lib/rancher/k3s/data/0f1a87835be3817408b496b439fddb9ea54cab4298db472792bb1b1cbdc210bc/bin/containerd-shim-runc-v2 -namespace k8s.io -id d00cc363af40aee36210e396597e4c02712ae99535be21d204849dc33a22af88 -address /run/k3s/containerd/containerd.sock +root 3293 1 0 19:01 ? 00:00:00 /var/lib/rancher/k3s/data/0f1a87835be3817408b496b439fddb9ea54cab4298db472792bb1b1cbdc210bc/bin/containerd-shim-runc-v2 -namespace k8s.io -id 5df076fa9547c555a2231b9a9a7cbb44021eaa1ab68c9b59b13da960697143f6 -address /run/k3s/containerd/containerd.sock +root 4557 1 0 19:02 ? 00:00:00 /var/lib/rancher/k3s/data/0f1a87835be3817408b496b439fddb9ea54cab4298db472792bb1b1cbdc210bc/bin/containerd-shim-runc-v2 -namespace k8s.io -id f6483b71bcb7ea23356003921a7d90cf638b8f9e473728f3b28dc67163e0fa2d -address /run/k3s/containerd/containerd.sock +root 4644 1 0 19:02 ? 00:00:00 /var/lib/rancher/k3s/data/0f1a87835be3817408b496b439fddb9ea54cab4298db472792bb1b1cbdc210bc/bin/containerd-shim-runc-v2 -namespace k8s.io -id 4d8ceb2620c4e0501a49dc9192fc56d035e76bc79a2c6072fee8619730006233 -address /run/k3s/containerd/containerd.sock +``` +
+ +
+Remediation: + +By default, K3s sets the --bind-address argument to 127.0.0.1 +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "bind-address=" +``` +
+ +## 1.4 Scheduler + +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-scheduler-arg: + - "profiling=true" +``` +
+ +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address' +``` + +**Expected Result:** '--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s sets the --bind-address argument to 127.0.0.1 +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-scheduler-arg: + - "bind-address=" +``` +
+ +## 2 Etcd Node Configuration + +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.cert-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/server-client.crt' AND '.client-transport-security.key-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/server-client.key' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates cert and key files for etcd. +These are located in /var/lib/rancher/k3s/server/tls/etcd/. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use custom cert and key files. +
+ +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.client-cert-auth' is equal to 'true' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to disable client certificate authentication. +
+ +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.auto-tls' is present OR '.client-transport-security.auto-tls' is not present + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s does not set the --auto-tls parameter. +If this check fails, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --auto-tls parameter or set it to false. +client-transport-security: + auto-tls: false +
+ +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.cert-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt' AND '.peer-transport-security.key-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates peer cert and key files for etcd. +These are located in /var/lib/rancher/k3s/server/tls/etcd/. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use custom peer cert and key files. +
+ +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.client-cert-auth' is equal to 'true' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to disable peer client certificate authentication. +
+ +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.auto-tls' is present OR '.peer-transport-security.auto-tls' is not present + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. +If this check fails, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --peer-auto-tls parameter or set it to false. +peer-transport-security: + auto-tls: false +
+ +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.trusted-ca-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-4a89bd20=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-4a89bd20 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates a unique certificate authority for etcd. +This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use a shared certificate authority. +
+ +## 4.1 Worker Node Configuration Files + +### 4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + + All configuration is passed in as arguments at container run time. + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chmod 600 /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig` +
+ +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, `chown root:root /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig` +
+ +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chmod 600 /var/lib/rancher/k3s/agent/kubelet.kubeconfig` +
+ +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chown root:root /var/lib/rancher/k3s/agent/kubelet.kubeconfig` +
+ +### 4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c permissions=%a /var/lib/rancher/k3s/agent/client-ca.crt +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the following command to modify the file permissions of the +--client-ca-file `chmod 600 /var/lib/rancher/k3s/agent/client-ca.crt` +
+ +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/client-ca.crt +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the following command to modify the ownership of the --client-ca-file. +`chown root:root /var/lib/rancher/k3s/agent/client-ca.crt` +
+ +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + +## 4.2 Kubelet + +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi' +``` + +**Expected Result:** '--anonymous-auth' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you +should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "anonymous-auth=true" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="anonymous-auth=true" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi' +``` + +**Expected Result:** '--authorization-mode' does not have 'AlwaysAllow' + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --authorization-mode to AlwaysAllow. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "authorization-mode=AlwaysAllow" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="authorization-mode=AlwaysAllow" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi' +``` + +**Expected Result:** '--client-ca-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:28 server-0 k3s[2354]: time="2024-08-09T19:01:28Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the client ca certificate for the Kubelet. +It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt +
+ +### 4.2.4 Verify that the --read-only-port argument is set to 0 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--read-only-port' is equal to '0' OR '--read-only-port' is not present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you +should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "read-only-port=XXXX" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="read-only-port=XXXX" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--streaming-connection-idle-timeout' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. +``` +kubelet-arg: + - "streaming-connection-idle-timeout=5m" +``` +If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.6 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--make-iptables-util-chains' is equal to 'true' OR '--make-iptables-util-chains' is not present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. +``` +kubelet-arg: + - "make-iptables-util-chains=true" +``` +If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.7 Ensure that the --hostname-override argument is not set (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply +with cloud providers that require this flag to ensure that hostname matches node names. + +### 4.2.8 Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--event-qps' is greater or equal to 0 OR '--event-qps' is not present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s sets the event-qps to 0. Should you wish to change this, +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. +``` +kubelet-arg: + - "event-qps=" +``` +If using the command line, run K3s with --kubelet-arg="event-qps=<value>". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.9 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--tls-cert-file' is present AND '--tls-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the TLS certificate and private key for the Kubelet. +They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key +If for some reason you need to provide your own certificate and key, you can set the +the below parameters in the K3s config file /etc/rancher/k3s/config.yaml. +``` +kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" +``` +
+ +### 4.2.10 Ensure that the --rotate-certificates argument is not set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--rotate-certificates' is present OR '--rotate-certificates' is not present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. +If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.11 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** 'RotateKubeletServerCertificate' is present OR 'RotateKubeletServerCertificate' is not present + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the RotateKubeletServerCertificate feature gate. +If you have enabled this feature gate, you should remove it. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. +If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.12 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--tls-cipher-suites' contains valid elements from 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256' + +
+Returned Value: + +```console +Aug 09 19:01:30 server-0 k3s[2354]: time="2024-08-09T19:01:30Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to +``` +kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` +or to a subset of these values. +If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=<same values as above>" +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.13 Ensure that a limit is set on pod PIDs (Manual) + +**Result:** WARN + +**Remediation:** +Decide on an appropriate level for this parameter and set it, +If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to +``` +kubelet-arg: + - "pod-max-pids=" +``` + +## 5.1 RBAC and Service Accounts + +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + +**Result:** WARN + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + +**Result:** WARN + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Manual) + +**Result:** WARN + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + +**Result:** WARN + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + +**Result:** WARN + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +### 5.1.9 Minimize access to create persistent volumes (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove create access to PersistentVolume objects in the cluster. + +### 5.1.10 Minimize access to the proxy sub-resource of nodes (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the proxy sub-resource of node objects. + +### 5.1.11 Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + +### 5.1.12 Minimize access to webhook configuration objects (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + +### 5.1.13 Minimize access to the service account token creation (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the token sub-resource of serviceaccount objects. + +## 5.2 Pod Security Standards + +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + +**Result:** WARN + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +### 5.2.7 Minimize the admission of root containers (Automated) + +**Result:** WARN + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + +**Result:** WARN + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + +**Result:** WARN + +**Remediation:** +Review the use of capabilities in applications running on your cluster. Where a namespace +contains applications which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI + +### 5.3.1 Ensure that the CNI in use supports NetworkPolicies (Manual) + +**Result:** WARN + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have NetworkPolicies defined (Manual) + +**Result:** WARN + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +## 5.4 Secrets Management + +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + +**Result:** WARN + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + +**Result:** WARN + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control + +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies + +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + +**Result:** WARN + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + +**Result:** WARN + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + +**Result:** WARN + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.8.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.8.md new file mode 100644 index 000000000..3e6deb096 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/security/self-assessment-1.8.md @@ -0,0 +1,2836 @@ +--- +title: CIS 1.8 Self Assessment Guide +--- + +## Overview + +This document is a companion to the [K3s security hardening guide](hardening-guide.md). The hardening guide provides prescriptive guidance for hardening a production installation of K3s, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the CIS Kubernetes Benchmark. It is to be used by K3s operators, security teams, auditors, and decision-makers. + +This guide is specific to the **v1.26-v1.29** release line of K3s and the **v1.8** release of the CIS Kubernetes Benchmark. + +For more information about each control, including detailed descriptions and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.8. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes). + +### Testing controls methodology + +Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. + +Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. + +These are the possible results for each control: + +- **Pass** - The K3s cluster under test passed the audit outlined in the benchmark. +- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section will explain why this is so. +- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s does not prevent their implementation, but no further configuration or auditing of the cluster under test has been performed. + +This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary and will require you to adjust the "audit" commands to fit your scenario. + +## 1.1 Control Plane Node Configuration Files + +### 1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +``` +
+ +
+Remediation: + +By default, K3s sets the CNI file permissions to 600. +Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. +If you modify your CNI configuration, ensure that the permissions are set to 600. +For example, `chmod 600 /var/lib/cni/networks/` +
+ +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + +**Result:** Not Applicable + +**Rationale:** + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root ` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +if [ "$(journalctl -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd +else + echo "permissions=700" +fi +``` + +**Expected Result:** permissions has permissions 700, expected 700 or more restrictive + +
+Returned Value: + +```console +permissions=700 +``` +
+ +
+Remediation: + +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +`chmod 700 /var/lib/etcd` +
+ +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + +**Result:** Not Applicable + +**Rationale:** + +For K3s, etcd is embedded within the k3s process. There is no separate etcd process. +Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + +### 1.1.13 Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, `chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig` +
+ +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, `chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig` +
+ +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chmod 600 /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig` +
+ +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig` +
+ +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chmod 600 /var/lib/rancher/k3s/server/cred/controller.kubeconfig` +
+ +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/cred/controller.kubeconfig +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown root:root /var/lib/rancher/k3s/server/cred/controller.kubeconfig` +
+ +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the control plane node. +For example, +`chown -R root:root /var/lib/rancher/k3s/server/tls` +
+ +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual) + +**Result:** WARN + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +`chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the master node. +For example, +`chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key` +
+ +## 1.2 API Server + +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' +``` + +**Expected Result:** '--anonymous-auth' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --anonymous-auth argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. +``` +kube-apiserver-arg: + - "anonymous-auth=true" +``` +
+ +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--token-auth-file' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Follow the documentation and configure alternate mechanisms for authentication. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. +``` +kube-apiserver-arg: + - "token-auth-file=" +``` +
+ +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--enable-admission-plugins' does not have 'DenyServiceExternalIPs' OR '--enable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set DenyServiceExternalIPs. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" +``` +
+ +### 1.2.4 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--kubelet-client-certificate' is present AND '--kubelet-client-key' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the kubelet client certificate and key. +They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key +If for some reason you need to provide your own certificate and key, you can set the +below parameters in the K3s config file /etc/rancher/k3s/config.yaml. +``` +kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" +``` +
+ +### 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result:** '--kubelet-certificate-authority' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. +If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "kubelet-certificate-authority=" +``` +
+ +### 1.2.6 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' does not have 'AlwaysAllow' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --authorization-mode to AlwaysAllow. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" +``` +
+ +### 1.2.7 Ensure that the --authorization-mode argument includes Node (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' has 'Node' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --authorization-mode to Node and RBAC. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, +ensure that you are not overriding authorization-mode. +
+ +### 1.2.8 Ensure that the --authorization-mode argument includes RBAC (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result:** '--authorization-mode' has 'RBAC' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --authorization-mode to Node and RBAC. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, +ensure that you are not overriding authorization-mode. +
+ +### 1.2.9 Ensure that the admission control plugin EventRateLimit is set (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" +``` + +### 1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" +``` +
+ +### 1.2.11 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + +**Result:** WARN + +**Remediation:** +Permissive, per CIS guidelines, +"This setting could impact offline or isolated clusters, which have images pre-loaded and +do not have access to a registry to pull in-use images. This setting is not appropriate for +clusters which use this configuration." +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." +``` + +### 1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + +**Result:** Not Applicable + +**Rationale:** + +Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. + +### 1.2.13 Ensure that the admission control plugin ServiceAccount is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --disable-admission-plugins to anything. +Follow the documentation and create ServiceAccount objects as per your environment. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" +``` +
+ +### 1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --disable-admission-plugins to anything. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." +``` +
+ +### 1.2.15 Ensure that the admission control plugin NodeRestriction is set (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result:** '--enable-admission-plugins' has 'NodeRestriction' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --enable-admission-plugins to NodeRestriction. +If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. +If you are, include NodeRestriction in the list. +``` +kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." +``` +
+ +### 1.2.16 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "profiling=true" +``` +
+ +### 1.2.17 Ensure that the --audit-log-path argument is set (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-path' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +``` +kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" +``` +
+ +### 1.2.18 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-maxage' is greater or equal to 30 + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, +``` +kube-apiserver-arg: + - "audit-log-maxage=30" +``` +
+ +### 1.2.19 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-maxbackup' is greater or equal to 10 + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, +``` +kube-apiserver-arg: + - "audit-log-maxbackup=10" +``` +
+ +### 1.2.20 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--audit-log-maxsize' is greater or equal to 100 + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and +set the audit-log-maxsize parameter to an appropriate size in MB. For example, +``` +kube-apiserver-arg: + - "audit-log-maxsize=100" +``` +
+ +### 1.2.21 Ensure that the --request-timeout argument is set as appropriate (Manual) + +**Result:** WARN + +**Remediation:** +Permissive, per CIS guidelines, +"it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". +Edit the K3s config file /etc/rancher/k3s/config.yaml +and set the below parameter if needed. For example, +``` +kube-apiserver-arg: + - "request-timeout=300s" +``` + +### 1.2.22 Ensure that the --service-account-lookup argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--service-account-lookup' is not present OR '--service-account-lookup' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --service-account-lookup argument. +Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, +``` +kube-apiserver-arg: + - "service-account-lookup=true" +``` +Alternatively, you can delete the service-account-lookup parameter from this file so +that the default takes effect. +
+ +### 1.2.23 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 +``` + +**Expected Result:** '--service-account-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s automatically generates and sets the service account key file. +It is located at /var/lib/rancher/k3s/server/tls/service.key. +If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "service-account-key-file=" +``` +
+ +### 1.2.24 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +if [ "$(journalctl -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 +else + echo "--etcd-certfile AND --etcd-keyfile" +fi +``` + +**Expected Result:** '--etcd-certfile' is present AND '--etcd-keyfile' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s automatically generates and sets the etcd certificate and key files. +They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" +``` +
+ +### 1.2.25 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2 +``` + +**Expected Result:** '--tls-cert-file' is present AND '--tls-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. +They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" +``` +
+ +### 1.2.26 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file' +``` + +**Expected Result:** '--client-ca-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the client certificate authority file. +It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. +If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "client-ca-file=" +``` +
+ +### 1.2.27 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile' +``` + +**Expected Result:** '--etcd-cafile' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the etcd certificate authority file. +It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. +If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-apiserver-arg: + - "etcd-cafile=" +``` +
+ +### 1.2.28 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config' +``` + +**Expected Result:** '--encryption-provider-config' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +K3s can be configured to use encryption providers to encrypt secrets at rest. +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. +secrets-encryption: true +Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. +If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. +
+ +### 1.2.29 Ensure that encryption providers are appropriately configured (Manual) + +**Result:** PASS + +**Audit:** +```bash +ENCRYPTION_PROVIDER_CONFIG=$(journalctl -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') +if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi +``` + +**Expected Result:** 'provider' contains valid elements from 'aescbc,kms,secretbox' + +
+Returned Value: + +```console +provider=aescbc +``` +
+ +
+Remediation: + +K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. +secrets-encryption: true +Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. +If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json +
+ +### 1.2.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites' +``` + +**Expected Result:** '--tls-cipher-suites' contains valid elements from 'TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. +If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. +If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: +``` +kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` +
+ +## 1.3 Controller Manager + +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold' +``` + +**Expected Result:** '--terminated-pod-gc-threshold' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node +and set the --terminated-pod-gc-threshold to an appropriate threshold, +``` +kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" +``` +
+ +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "profiling=true" +``` +
+ +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials' +``` + +**Expected Result:** '--use-service-account-credentials' is not equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --use-service-account-credentials argument to true. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "use-service-account-credentials=false" +``` +
+ +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file' +``` + +**Expected Result:** '--service-account-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the service account private key file. +It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "service-account-private-key-file=" +``` +
+ +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file' +``` + +**Expected Result:** '--root-ca-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the root CA file. +It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. +If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "root-ca-file=" +``` +
+ +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 +``` + +**Expected Result:** '--feature-gates' is present OR '--feature-gates' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s does not set the RotateKubeletServerCertificate feature gate. +If you have enabled this feature gate, you should remove it. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. +``` +kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" +``` +
+ +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-controller-manager' | tail -n1 +``` + +**Expected Result:** '--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --terminated-pod-gc-threshold=10 --use-service-account-credentials=true" +``` +
+ +
+Remediation: + +By default, K3s sets the --bind-address argument to 127.0.0.1 +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-controller-manager-arg: + - "bind-address=" +``` +
+ +## 1.4 Scheduler + +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'profiling' +``` + +**Expected Result:** '--profiling' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s sets the --profiling argument to false. +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-scheduler-arg: + - "profiling=true" +``` +
+ +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address' +``` + +**Expected Result:** '--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` +
+ +
+Remediation: + +By default, K3s sets the --bind-address argument to 127.0.0.1 +If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. +``` +kube-scheduler-arg: + - "bind-address=" +``` +
+ +## 2 Etcd Node Configuration + +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.cert-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/server-client.crt' AND '.client-transport-security.key-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/server-client.key' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates cert and key files for etcd. +These are located in /var/lib/rancher/k3s/server/tls/etcd/. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use custom cert and key files. +
+ +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.client-cert-auth' is equal to 'true' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to disable client certificate authentication. +
+ +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.client-transport-security.auto-tls' is present OR '.client-transport-security.auto-tls' is not present + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s does not set the --auto-tls parameter. +If this check fails, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --auto-tls parameter or set it to false. +client-transport-security: + auto-tls: false +
+ +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.cert-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt' AND '.peer-transport-security.key-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates peer cert and key files for etcd. +These are located in /var/lib/rancher/k3s/server/tls/etcd/. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use custom peer cert and key files. +
+ +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.client-cert-auth' is equal to 'true' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to disable peer client certificate authentication. +
+ +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.auto-tls' is present OR '.peer-transport-security.auto-tls' is not present + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. +If this check fails, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --peer-auto-tls parameter or set it to false. +peer-transport-security: + auto-tls: false +
+ +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + +**Result:** PASS + +**Audit:** +```bash + +``` + +**Expected Result:** '.peer-transport-security.trusted-ca-file' is equal to '/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt' + +
+Returned Value: + +```console +advertise-client-urls: https://10.10.10.100:2379 +client-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt +data-dir: /var/lib/rancher/k3s/server/db/etcd +election-timeout: 5000 +experimental-initial-corrupt-check: true +experimental-watch-progress-notify-interval: 5000000000 +heartbeat-interval: 500 +initial-advertise-peer-urls: https://10.10.10.100:2380 +initial-cluster: server-0-11120bb0=https://10.10.10.100:2380 +initial-cluster-state: new +listen-client-http-urls: https://127.0.0.1:2382 +listen-client-urls: https://127.0.0.1:2379,https://10.10.10.100:2379 +listen-metrics-urls: http://127.0.0.1:2381 +listen-peer-urls: https://127.0.0.1:2380,https://10.10.10.100:2380 +log-outputs: +- stderr +logger: zap +name: server-0-11120bb0 +peer-transport-security: + cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt + client-cert-auth: true + key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key + trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +snapshot-count: 10000 +``` +
+ +
+Remediation: + +If running on with sqlite or a external DB, etcd checks are Not Applicable. +When running with embedded-etcd, K3s generates a unique certificate authority for etcd. +This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. +If this check fails, ensure that the configuration file /var/lib/rancher/k3s/server/db/etcd/config +has not been modified to use a shared certificate authority. +
+ +## 4.1 Worker Node Configuration Files + +### 4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + + All configuration is passed in as arguments at container run time. + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chmod 600 /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig` +
+ +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, `chown root:root /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig` +
+ +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chmod 600 /var/lib/rancher/k3s/agent/kubelet.kubeconfig` +
+ +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result:** 'root:root' is present + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the below command (based on the file location on your system) on the each worker node. +For example, +`chown root:root /var/lib/rancher/k3s/agent/kubelet.kubeconfig` +
+ +### 4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c permissions=%a /var/lib/rancher/k3s/agent/client-ca.crt +``` + +**Expected Result:** permissions has permissions 600, expected 600 or more restrictive + +
+Returned Value: + +```console +permissions=600 +``` +
+ +
+Remediation: + +Run the following command to modify the file permissions of the +--client-ca-file `chmod 600 /var/lib/rancher/k3s/agent/client-ca.crt` +
+ +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + +**Result:** PASS + +**Audit:** +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/client-ca.crt +``` + +**Expected Result:** 'root:root' is equal to 'root:root' + +
+Returned Value: + +```console +root:root +``` +
+ +
+Remediation: + +Run the following command to modify the ownership of the --client-ca-file. +`chown root:root /var/lib/rancher/k3s/agent/client-ca.crt` +
+ +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + +**Result:** Not Applicable + +**Rationale:** + +The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + +## 4.2 Kubelet + +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi' +``` + +**Expected Result:** '--anonymous-auth' is equal to 'false' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you +should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "anonymous-auth=true" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="anonymous-auth=true" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi' +``` + +**Expected Result:** '--authorization-mode' does not have 'AlwaysAllow' + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s does not set the --authorization-mode to AlwaysAllow. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "authorization-mode=AlwaysAllow" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="authorization-mode=AlwaysAllow" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi' +``` + +**Expected Result:** '--client-ca-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:17 server-0 k3s[2357]: time="2024-08-09T19:06:17Z" level=info msg="Running kube-apiserver --admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml --advertise-address=10.10.10.100 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --encryption-provider-config-automatic-reload=true --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the client ca certificate for the Kubelet. +It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt +
+ +### 4.2.4 Verify that the --read-only-port argument is set to 0 (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--read-only-port' is equal to '0' OR '--read-only-port' is not present + +
+Returned Value: + +```console +Aug 09 19:06:19 server-0 k3s[2357]: time="2024-08-09T19:06:19Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --feature-gates=CloudDualStackNodeIPs=true --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you +should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. +``` +kubelet-arg: + - "read-only-port=XXXX" +``` +If using the command line, edit the K3s service file and remove the below argument. +--kubelet-arg="read-only-port=XXXX" +Based on your system, restart the k3s service. For example, +systemctl daemon-reload +systemctl restart k3s.service +
+ +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--streaming-connection-idle-timeout' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present + +
+Returned Value: + +```console +Aug 09 19:06:19 server-0 k3s[2357]: time="2024-08-09T19:06:19Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --feature-gates=CloudDualStackNodeIPs=true --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. +``` +kubelet-arg: + - "streaming-connection-idle-timeout=5m" +``` +If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.6 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--make-iptables-util-chains' is equal to 'true' OR '--make-iptables-util-chains' is not present + +
+Returned Value: + +```console +Aug 09 19:06:19 server-0 k3s[2357]: time="2024-08-09T19:06:19Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --feature-gates=CloudDualStackNodeIPs=true --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. +``` +kubelet-arg: + - "make-iptables-util-chains=true" +``` +If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.7 Ensure that the --hostname-override argument is not set (Automated) + +**Result:** Not Applicable + +**Rationale:** + +By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply +with cloud providers that require this flag to ensure that hostname matches node names. + +### 4.2.8 Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--event-qps' is greater or equal to 0 OR '--event-qps' is not present + +
+Returned Value: + +```console +Aug 09 19:06:19 server-0 k3s[2357]: time="2024-08-09T19:06:19Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --feature-gates=CloudDualStackNodeIPs=true --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s sets the event-qps to 0. Should you wish to change this, +If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. +``` +kubelet-arg: + - "event-qps=" +``` +If using the command line, run K3s with --kubelet-arg="event-qps=<value>". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.9 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--tls-cert-file' is present AND '--tls-private-key-file' is present + +
+Returned Value: + +```console +Aug 09 19:06:19 server-0 k3s[2357]: time="2024-08-09T19:06:19Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --feature-gates=CloudDualStackNodeIPs=true --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +By default, K3s automatically provides the TLS certificate and private key for the Kubelet. +They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key +If for some reason you need to provide your own certificate and key, you can set the +below parameters in the K3s config file /etc/rancher/k3s/config.yaml. +``` +kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" +``` +
+ +### 4.2.10 Ensure that the --rotate-certificates argument is not set to false (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '.rotateCertificates' is present OR '.rotateCertificates' is not present + +
+Returned Value: + +```console +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1:6443 + certificate-authority: /var/lib/rancher/k3s/agent/server-ca.crt + name: local +contexts: +- context: + cluster: local + namespace: default + user: user + name: Default +current-context: Default +kind: Config +preferences: {} +users: +- name: user + user: + client-certificate: /var/lib/rancher/k3s/agent/client-kubelet.crt + client-key: /var/lib/rancher/k3s/agent/client-kubelet.key +``` +
+ +
+Remediation: + +By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. +If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.11 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '.featureGates.RotateKubeletServerCertificate' is present OR '.featureGates.RotateKubeletServerCertificate' is not present + +
+Returned Value: + +```console +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1:6443 + certificate-authority: /var/lib/rancher/k3s/agent/server-ca.crt + name: local +contexts: +- context: + cluster: local + namespace: default + user: user + name: Default +current-context: Default +kind: Config +preferences: {} +users: +- name: user + user: + client-certificate: /var/lib/rancher/k3s/agent/client-kubelet.crt + client-key: /var/lib/rancher/k3s/agent/client-kubelet.key +``` +
+ +
+Remediation: + +By default, K3s does not set the RotateKubeletServerCertificate feature gate. +If you have enabled this feature gate, you should remove it. +If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. +If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.12 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + +**Result:** PASS + +**Audit:** +```bash +journalctl -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result:** '--tls-cipher-suites' contains valid elements from 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256' + +
+Returned Value: + +```console +Aug 09 19:06:19 server-0 k3s[2357]: time="2024-08-09T19:06:19Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --event-qps=0 --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --feature-gates=CloudDualStackNodeIPs=true --healthz-bind-address=127.0.0.1 --hostname-override=server-0 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-ip=10.10.10.100 --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` +
+ +
+Remediation: + +If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to +``` +kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" +``` +or to a subset of these values. +If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=<same values as above>" +Based on your system, restart the k3s service. For example, +systemctl restart k3s.service +
+ +### 4.2.13 Ensure that a limit is set on pod PIDs (Manual) + +**Result:** WARN + +**Remediation:** +Decide on an appropriate level for this parameter and set it, +If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to +``` +kubelet-arg: + - "pod-max-pids=" +``` + +## 5.1 RBAC and Service Accounts + +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + +**Result:** WARN + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + +**Result:** WARN + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Manual) + +**Result:** WARN + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + +**Result:** WARN + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + +**Result:** WARN + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +### 5.1.9 Minimize access to create persistent volumes (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove create access to PersistentVolume objects in the cluster. + +### 5.1.10 Minimize access to the proxy sub-resource of nodes (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the proxy sub-resource of node objects. + +### 5.1.11 Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + +### 5.1.12 Minimize access to webhook configuration objects (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + +### 5.1.13 Minimize access to the service account token creation (Manual) + +**Result:** WARN + +**Remediation:** +Where possible, remove access to the token sub-resource of serviceaccount objects. + +## 5.2 Pod Security Standards + +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + +**Result:** WARN + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +### 5.2.7 Minimize the admission of root containers (Automated) + +**Result:** WARN + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + +**Result:** WARN + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + +**Result:** WARN + +**Remediation:** +Review the use of capabilities in applications running on your cluster. Where a namespace +contains applications which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + +**Result:** WARN + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI + +### 5.3.1 Ensure that the CNI in use supports NetworkPolicies (Manual) + +**Result:** WARN + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have NetworkPolicies defined (Manual) + +**Result:** WARN + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +## 5.4 Secrets Management + +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + +**Result:** WARN + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + +**Result:** WARN + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control + +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies + +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + +**Result:** WARN + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + +**Result:** WARN + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + +**Result:** WARN + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + +**Result:** WARN + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/storage.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/storage.md new file mode 100644 index 000000000..dd20cf240 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/storage.md @@ -0,0 +1,170 @@ +--- +title: "Volumes e Armazenamento" +--- + +Ao implantar um aplicativo que precisa reter dados, você precisará criar um armazenamento persistente. O armazenamento persistente permite que você armazene dados do aplicativo externamente ao pod que está executando seu aplicativo. Essa prática de armazenamento permite que você mantenha os dados do aplicativo, mesmo se o pod do aplicativo falhar. + +Um volume persistente (PV) é uma parte do armazenamento no cluster do Kubernetes, enquanto uma reivindicação de volume persistente (PVC) é uma solicitação de armazenamento. Para obter detalhes sobre como os PVs e PVCs funcionam, consulte a documentação oficial do Kubernetes sobre [armazenamento.](https://kubernetes.io/docs/concepts/storage/volumes/) + +Esta página descreve como configurar o armazenamento persistente com um provedor de armazenamento local ou com [Longhorn.](#setting-up-longhorn) + +## O que há de diferente no armazenamento do K3s? + +O K3s remove vários plugins de volume opcionais e todos os provedores de nuvem integrados (às vezes chamados de "in-tree"). Fazemos isso para atingir um tamanho binário menor e evitar a dependência de tecnologias e serviços de nuvem ou data center de terceiros, que podem não estar disponíveis em muitos casos de uso do K3s. Podemos fazer isso porque sua remoção não afeta nem a funcionalidade principal do Kubernetes nem a conformidade. + +Os seguintes plugins de volume foram removidos do K3s: + +* cephfs +* fc +* flocker +* git_repo +* glusterfs +* portworx +* quobyte +* rbd +* storageos + +Ambos os componentes têm alternativas fora da árvore que podem ser usadas com K3s: o Kubernetes [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) e [Cloud Provider Interface (CPI)](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). + +Os mantenedores do Kubernetes estão migrando ativamente plugins de volume in-tree para drivers CSI. Para mais informações sobre essa migração, consulte [aqui](https://kubernetes.io/blog/2021/12/10/storage-in-tree-to-csi-migration-status-update/). + +## Configurando o Provedor de Armazenamento Local +O K3s vem com o Local Path Provisioner do Rancher e isso permite a capacidade de criar reivindicações de volume persistentes imediatamente usando armazenamento local no respectivo nó. Abaixo, cobrimos um exemplo simples. Para obter mais informações, consulte a documentação oficial [aqui](https://github.com/rancher/local-path-provisioner/blob/master/README.md#usage). + +Crie uma reivindicação de volume persistente com suporte do hostPath e um pod para utilizá-la: + +### pvc.yaml + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: local-path-pvc + namespace: default +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-path + resources: + requests: + storage: 2Gi +``` + +### pod.yaml + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: volume-test + namespace: default +spec: + containers: + - name: volume-test + image: nginx:stable-alpine + imagePullPolicy: IfNotPresent + volumeMounts: + - name: volv + mountPath: /data + ports: + - containerPort: 80 + volumes: + - name: volv + persistentVolumeClaim: + claimName: local-path-pvc +``` + +Aplique o yaml: + +```bash +kubectl create -f pvc.yaml +kubectl create -f pod.yaml +``` + +Confirme se o PV e o PVC foram criados: + +```bash +kubectl get pv +kubectl get pvc +``` + +O status deve ser Bound para cada um. + +## Configurando o Longhorn + +:::warning + +O Longhorn não suporta ARM32. + +::: + + +O K3s oferece suporte ao [Longhorn](https://github.com/longhorn/longhorn), um sistema de armazenamento em bloco distribuído de código aberto para Kubernetes. + +Abaixo cobrimos um exemplo simples. Para mais informações, consulte a [documentação oficial](https://longhorn.io/docs/latest/). + +Aplique o longhorn.yaml para instalar o Longhorn: + +```bash +kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml +``` + +O Longhorn será instalado no namespace `longhorn-system`. + +Crie uma reivindicação de volume persistente e um pod para utilizá-lo: + +### pvc.yaml + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: longhorn-volv-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn + resources: + requests: + storage: 2Gi +``` + +### pod.yaml + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: volume-test + namespace: default +spec: + containers: + - name: volume-test + image: nginx:stable-alpine + imagePullPolicy: IfNotPresent + volumeMounts: + - name: volv + mountPath: /data + ports: + - containerPort: 80 + volumes: + - name: volv + persistentVolumeClaim: + claimName: longhorn-volv-pvc +``` + +Aplique o yaml para criar o PVC e o pod: + +```bash +kubectl create -f pvc.yaml +kubectl create -f pod.yaml +``` + +Confirme se o PV e o PVC foram criados: + +```bash +kubectl get pv +kubectl get pvc +``` + +O status deve ser Bound para cada um. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/automated.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/automated.md new file mode 100644 index 000000000..4a1ee8bf0 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/automated.md @@ -0,0 +1,165 @@ +--- +title: "Atualizações Automatizadas" +--- + +### Visão Geral + +Você pode gerenciar atualizações de cluster do K3s usando o system-upgrade-controller do Rancher. Esta é uma abordagem nativa do Kubernetes para atualizações de cluster. Ela aproveita uma [definição de recurso personalizada (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#custom-resources), um `plan` e um [controlador](https://kubernetes.io/docs/concepts/architecture/controller/). + +O plano define políticas e requisitos de atualização. Ele também define quais nós devem ser atualizados por meio de um [seletor de rótulo](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). Veja abaixo os planos com padrões apropriados para atualizar um cluster K3s. Para opções de configuração de plano mais avançadas, revise o [CRD](https://github.com/rancher/system-upgrade-controller/blob/master/pkg/apis/upgrade.cattle.io/v1/types.go). + +O controlador agenda atualizações monitorando planos e selecionando nós para executar [jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) de atualização. Quando um job é executado até a conclusão com sucesso, o controlador rotula o nó no qual ele foi executado adequadamente. + +:::note +O trabalho de atualização que é iniciado deve ser altamente privilegiado. Ele é configurado com o seguinte: +- Namespaces do host `IPC`, `NET` e `PID` +- A capacidade `CAP_SYS_BOOT` +- Raiz do host montada em `/host` com permissões de leitura e gravação + +::: + + +Para automatizar atualizações dessa maneira, você deve fazer o seguinte: + +1. Instale o system-upgrade-controller em seu cluster +1. Configure os planos + +:::warning +Se o cluster K3s for gerenciado pelo Rancher, você deve usar a IU do Rancher para gerenciar atualizações. +- Se o cluster K3s foi importado para o Rancher, o Rancher gerenciará a implantação e os planos do system-upgrade-controller. Não siga as etapas desta página. +- Se o cluster K3s foi provisionado pelo Rancher, o Rancher usará o agente do sistema para gerenciar atualizações de versão. Não siga as etapas desta página. +- Se o cluster K3s *não* for gerenciado pelo Rancher, você pode seguir as etapas abaixo. +::: + +Para mais detalhes sobre o design e a arquitetura do system-upgrade-controller ou sua integração com o K3s, consulte os seguintes repositórios Git: + +- [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) +- [k3s-upgrade](https://github.com/k3s-io/k3s-upgrade) + +:::tip +Ao tentar atualizar para uma nova versão do K3s, a [política de distorção de versão do Kubernetes](https://kubernetes.io/releases/version-skew-policy/) se aplica. Certifique-se de que seu plano não pule versões secundárias intermediárias ao atualizar. O próprio system-upgrade-controller não protegerá contra alterações não suportadas na versão do Kubernetes. +::: + +### Instalação do system-upgrade-controller + +O system-upgrade-controller pode ser instalado como uma implantação no seu cluster. A implantação requer uma conta de serviço, clusterRoleBinding e um configmap. Para instalar esses componentes, execute o seguinte comando: + +```bash +kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/latest/download/system-upgrade-controller.yaml +``` +O controlador pode ser configurado e personalizado por meio do configmap mencionado anteriormente, mas o controlador deve ser reimplantado para que as alterações sejam aplicadas. + +Para poder aplicar os planos, o CRD do controlador de atualização do sistema precisa ser implantado: + +```bash +kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/latest/download/crd.yaml +``` + +### Configurar planos +É recomendável que você crie pelo menos dois planos: um plano para atualizar nós de servidor (plano de controle) e um plano para atualizar nós de agente. Você pode criar planos adicionais conforme necessário para controlar a implementação da atualização entre nós. Depois que os planos forem criados, o controlador os selecionará e começará a atualizar seu cluster. + +Os dois planos de exemplo a seguir atualizarão seu cluster para K3s v1.24.6+k3s1: + +```yaml +# Server plan +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: server-plan + namespace: system-upgrade +spec: + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "true" + serviceAccountName: system-upgrade + upgrade: + image: rancher/k3s-upgrade + version: v1.24.6+k3s1 +--- +# Agent plan +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: agent-plan + namespace: system-upgrade +spec: + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + prepare: + args: + - prepare + - server-plan + image: rancher/k3s-upgrade + serviceAccountName: system-upgrade + upgrade: + image: rancher/k3s-upgrade + version: v1.24.6+k3s1 +``` + +Há algumas coisas importantes a serem destacadas em relação a esses planos: + +1) Os planos devem ser criados no mesmo namespace em que o controlador foi implantado. + +2) O campo `concurrency` indica quantos nós podem ser atualizados ao mesmo tempo. + +3) O server-plan tem como alvo os nós do servidor especificando um seletor de rótulo que seleciona nós com o rótulo `node-role.kubernetes.io/control-plane`. O agent-plan tem como alvo os nós do agente especificando um seletor de rótulo que seleciona nós sem esse rótulo. + +4) A etapa `prepare` no agent-plan fará com que os trabalhos de atualização para esse plano aguardem a conclusão do server-plan antes de serem executados. + +5) Ambos os planos têm o campo `version` definido como v1.24.6+k3s1. Como alternativa, você pode omitir o campo `version` e definir o campo `channel` como uma URL que resolva uma versão do K3s. Isso fará com que o controlador monitore essa URL e atualize o cluster sempre que ele resolver para uma nova versão. Isso funciona bem com os [canais de versão](manual.md#release-channels). Assim, você pode configurar seus planos com o seguinte canal para garantir que seu cluster seja sempre atualizado automaticamente para a versão estável mais recente do K3s: +```yaml +apiVersion: upgrade.cattle.io/v1 +kind: Plan +... +spec: + ... + channel: https://update.k3s.io/v1-release/channels/stable + +``` + +Conforme declarado, a atualização começará assim que o controlador detectar que um plano foi criado. Atualizar um plano fará com que o controlador reavalie o plano e determine se outra atualização é necessária. + +Você pode monitorar o progresso de uma atualização visualizando o plano e os trabalhos via kubectl: +```bash +kubectl -n system-upgrade get plans -o yaml +kubectl -n system-upgrade get jobs -o yaml +``` + + +## Prevenção de Downgrade + +:::info Nota de Versão +Começando com as versões 2023-07 ([v1.27.4+k3s1](https://github.com/k3s-io/k3s-upgrade/releases/tag/v1.27.4%2Bk3s1), [v1.26.7+k3s1](https://github.com/k3s-io/k3s-upgrade/releases/tag/v1.26.7%2Bk3s1), [v1.25.12+k3s1](https://github.com/k3s-io/k3s-upgrade/releases/tag/v1.25.12%2Bk3s1), [v1.24.16+k3s1](https://github.com/k3s-io/k3s-upgrade/releases/tag/v1.24.16%2Bk3s1)) +::: + +O Kubernetes não oferece suporte a downgrades de componentes do plano de controle. A imagem k3s-upgrade usada pelos planos de atualização se recusará a fazer downgrade do K3s, falhando no plano e deixando seus nós isolados. + +Aqui está um cluster de exemplo, mostrando pods de atualização com falha e nós isolados: + +```console +ubuntu@user:~$ kubectl get pods -n system-upgrade +NAME READY STATUS RESTARTS AGE +apply-k3s-server-on-ip-172-31-0-16-with-7af95590a5af8e8c3-2cdc6 0/1 Error 0 9m25s +apply-k3s-server-on-ip-172-31-10-23-with-7af95590a5af8e8c-9xvwg 0/1 Error 0 14m +apply-k3s-server-on-ip-172-31-13-213-with-7af95590a5af8e8-8j72v 0/1 Error 0 18m +system-upgrade-controller-7c4b84d5d9-kkzr6 1/1 Running 0 20m +ubuntu@user:~$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-172-31-0-16 Ready,SchedulingDisabled control-plane,etcd,master 19h v1.27.4+k3s1 +ip-172-31-10-23 Ready,SchedulingDisabled control-plane,etcd,master 19h v1.27.4+k3s1 +ip-172-31-13-213 Ready,SchedulingDisabled control-plane,etcd,master 19h v1.27.4+k3s1 +ip-172-31-2-13 Ready 19h v1.27.4+k3s1 +``` +Você pode retornar seus nós cordonados ao serviço por qualquer um dos seguintes métodos: +* Altere a versão ou o canal em seu plano para direcionar uma versão que seja a mesma ou mais recente do que a que está em execução no cluster, para que o plano seja bem-sucedido. +* Exclua o plano e descordone manualmente os nós. +Use `kubectl get plan -n system-upgrade` para encontrar o nome do plano e, em seguida, `kubectl delete plan -n system-upgrade PLAN_NAME` para excluí-lo. Depois que o plano for excluído, use `kubectl uncordon NODE_NAME` para descordone cada um dos nós. diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/killall.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/killall.md new file mode 100644 index 000000000..559dd30c4 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/killall.md @@ -0,0 +1,73 @@ +--- +title: Parando K3s +--- + + +Para permitir alta disponibilidade durante atualizações, os contêineres K3s continuam em execução quando o serviço K3s é interrompido. + + +## Serviço K3s + +Parar e reiniciar o K3s é suportado pelo script de instalação do systemd e do OpenRC. + + + + +Parando servidor: +```sh +sudo systemctl stop k3s +``` + +Reiniciando servidor: +```sh +sudo systemctl start k3s +``` + +Parando agentes: +```sh +sudo systemctl stop k3s-agent +``` + +Reiniciando agentes: +```sh +sudo systemctl start k3s-agent +``` + + + + +Parando servidor: +```sh +sudo rc-service k3s stop +``` + +Reiniciando servidor: +```sh +sudo rc-service k3s restart +``` + +Parando agentes: +```sh +sudo rc-service k3s-agent stop +``` + +Reiniciando agentes: +```sh +sudo rc-service k3s-agent restart +``` + + + + + +## Killall Script + +Para parar todos os contêineres K3s e redefinir o estado do contêiner, o script `k3s-killall.sh` pode ser usado. + +O script killall limpa contêineres, diretórios K3s e componentes de rede, além de remover a cadeia iptables com todas as regras associadas. Os dados do cluster não serão excluídos. + +Para executar o script killall de um nó de servidor, execute: + +```bash +/usr/local/bin/k3s-killall.sh +``` \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/manual.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/manual.md new file mode 100644 index 000000000..5fe73ef7a --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/manual.md @@ -0,0 +1,73 @@ +--- +title: "Atualizações Manuais" +--- + +Você pode atualizar o K3s usando o script de instalação ou instalando manualmente o binário da versão desejada. + +:::note +Ao atualizar, atualize primeiro os nós do servidor, um de cada vez, e depois todos os nós do agente. +::: + +### Canais de Lançamento + +As atualizações realizadas por meio do script de instalação ou usando nosso recurso [automated upgrades](automated.md) podem ser vinculadas a diferentes canais de lançamento. Os seguintes canais estão disponíveis: + +| Canal | Descrição | +| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| stable | (Padrão) Stable é recomendado para ambientes de produção. Essas versões passaram por um período de fortalecimento da comunidade. | +| latest | Latest é recomendado para testar os recursos mais recentes. Esses lançamentos ainda não passaram por um período de fortalecimento da comunidade. | +| v1.26 (example) | Há um canal de lançamento vinculado a cada versão secundária do Kubernetes, incluindo versões que estão no fim da vida útil. Esses canais selecionarão o patch mais recente disponível, não necessariamente uma versão estável. | + +Para uma lista exaustiva e atualizada de canais, você pode visitar a [API de serviço de canal k3s](https://update.k3s.io/v1-release/channels). Para mais detalhes técnicos sobre como os canais funcionam, você vê o [projeto channelserver](https://github.com/rancher/channelserver). + +:::tip +Ao tentar atualizar para uma nova versão do K3s, a [política de distorção de versão do Kubernetes](https://kubernetes.io/releases/version-skew-policy/) se aplica. Certifique-se de que seu plano não pule versões secundárias intermediárias ao atualizar. O próprio system-upgrade-controller não protegerá contra alterações não suportadas na versão do Kubernetes. +::: + +### Atualizar K3s Usando o Script de Instalação + +Para atualizar o K3s de uma versão mais antiga, você pode executar novamente o script de instalação usando as mesmas opções de configuração usadas originalmente ao executar o script de instalação. + +:::info Nota +A variável `INSTALL_K3S_EXEC`, as variáveis ​​`K3S_` e os argumentos shell finais são todos usados ​​pelo script de instalação para gerar a unidade systemd e o arquivo de ambiente. +Se você definir a configuração ao executar originalmente o script de instalação, mas não defini-la novamente ao executar novamente o script de instalação, os valores originais serão perdidos. + +O conteúdo do [arquivo de configuração](../installation/configuration.md#configuration-file) não é gerenciado pelo script de instalação. +Se você quiser que sua configuração seja independente do script de instalação, você deve usar um arquivo de configuração em vez de passar variáveis ​​de ambiente ou argumentos para o script de instalação. +::: + +A execução do script de instalação irá: + +1. Baixará o novo binário k3s +2. Atualizar a unidade systemd ou o script init openrc para refletir os argumentos passados ​​para o script de instalação +3. Reiniciar o serviço k3s + +Por exemplo, para atualizar para a versão estável atual: + +```sh +curl -sfL https://get.k3s.io | sh -s - +``` + +Se você quiser atualizar para uma versão mais recente em um canal específico (como o mais recente), você pode especificar o canal: +```sh +curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh -s - +``` + +Se você quiser atualizar para uma versão específica, você pode executar o seguinte comando: + +```sh +curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z+k3s1 sh -s - +``` + +:::tip +Se você quiser baixar a nova versão do k3s, mas não iniciá-lo, você pode usar a variável de ambiente `INSTALL_K3S_SKIP_START=true`. +::: + +### Atualizar K3s Usando o Binário + +Para atualizar o K3s manualmente, você pode baixar a versão desejada do binário do K3s e substituir o binário existente pelo novo. + +1. Baixe a versão desejada do binário K3s em [releases](https://github.com/k3s-io/k3s/releases) +2. Copie o binário baixado para `/usr/local/bin/k3s` (ou seu local desejado) +3. Pare o antigo binário k3s +4. Inicie o novo binário k3s \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/upgrades.md b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/upgrades.md new file mode 100644 index 000000000..90e64e2a6 --- /dev/null +++ b/i18n/pt-BR/docusaurus-plugin-content-docs/current/upgrades/upgrades.md @@ -0,0 +1,21 @@ +--- +title: "Atualizações" +--- + +### Atualizando seu Cluster K3s + +[Manual Upgrades](manual.md) descreve várias técnicas para atualizar seu cluster manualmente. Ele também pode ser usado como base para atualização por meio de ferramentas de infraestrutura como código de terceiros, como [Terraform](https://www.terraform.io/). + +[Atualizações automatizadas](automated.md) descreve como executar atualizações automatizadas nativas do Kubernetes usando o [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) do Rancher. + +### Advertências Específicas da Versão + +- **Traefik:** Se o Traefik não estiver desabilitado, as versões 1.20 e anteriores do K3s instalarão o Traefik v1, enquanto as versões 1.21 e posteriores do K3s instalarão o Traefik v2, se o v1 ainda não estiver presente. Para atualizar do Traefik v1 mais antigo para o Traefik v2, consulte a [documentação do Traefik](https://doc.traefik.io/traefik/migration/v1-to-v2/) e use a [ferramenta de migração](https://github.com/traefik/traefik-migration-tool). + +- **Dados de bootstrap do K3s:** Se você estiver usando o K3s em uma configuração de HA com um armazenamento de dados SQL externo, e os nós do seu servidor (plano de controle) não foram iniciados com o sinalizador CLI `--token`, você não poderá mais adicionar servidores K3s adicionais ao cluster sem especificar o token. Certifique-se de manter uma cópia deste token, pois ele é necessário ao restaurar a partir do backup. Anteriormente, o K3s não impunha o uso de um token ao usar armazenamentos de dados SQL externos. + - As versões afetadas são <= v1.19.12+k3s1, v1.20.8+k3s1, v1.21.2+k3s1; as versões corrigidas são v1.19.13+k3s1, v1.20.9+k3s1, v1.21.3+k3s1. + +- Você pode recuperar o valor do token de qualquer servidor já associado ao cluster da seguinte maneira: +```bash +cat /var/lib/rancher/k3s/server/token +``` diff --git a/i18n/pt-BR/docusaurus-theme-classic/footer.json b/i18n/pt-BR/docusaurus-theme-classic/footer.json new file mode 100644 index 000000000..cda539712 --- /dev/null +++ b/i18n/pt-BR/docusaurus-theme-classic/footer.json @@ -0,0 +1,6 @@ +{ + "copyright": { + "message": "Copyright © 2025 Autores do Projeto K3s. Todos os direitos reservados.
A Linux Foundation possui marcas registradas\n e utiliza marcas comerciais. Para uma lista das marcas registradas da Linux Foundation,\n consulte nossa página de Uso de Marcas Comerciais.", + "description": "O rodapé de direitos autorais" + } +} \ No newline at end of file diff --git a/i18n/pt-BR/docusaurus-theme-classic/navbar.json b/i18n/pt-BR/docusaurus-theme-classic/navbar.json new file mode 100644 index 000000000..8eb45593a --- /dev/null +++ b/i18n/pt-BR/docusaurus-theme-classic/navbar.json @@ -0,0 +1,10 @@ +{ + "logo.alt": { + "message": "logo", + "description": "O texto alternativo do logotipo na barra de navegação" + }, + "item.label.GitHub": { + "message": "GitHub", + "description": "Item da barra de navegação com o rótulo GitHub" + } +} \ No newline at end of file