Skip to content

Commit 0082265

Browse files
Merge pull request #4675 from eggfoobar/add-arbiter-node-role-assets
OCPEDGE-1313: feat: added assets for arbiter node role
2 parents d9b4e74 + 2d4bca0 commit 0082265

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+1598
-12
lines changed
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
apiVersion: machineconfiguration.openshift.io/v1
2+
kind: MachineConfigPool
3+
metadata:
4+
name: arbiter
5+
labels:
6+
"operator.machineconfiguration.openshift.io/required-for-upgrade": ""
7+
"machineconfiguration.openshift.io/mco-built-in": ""
8+
"pools.operator.machineconfiguration.openshift.io/arbiter": ""
9+
spec:
10+
machineConfigSelector:
11+
matchLabels:
12+
"machineconfiguration.openshift.io/role": "arbiter"
13+
nodeSelector:
14+
matchLabels:
15+
node-role.kubernetes.io/arbiter: ""

manifests/machineconfigcontroller/custom-machine-config-pool-selector-validatingadmissionpolicy.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ spec:
2222
(object.spec.machineConfigSelector.matchLabels["machineconfiguration.openshift.io/role"] == "master")
2323
||
2424
(object.spec.machineConfigSelector.matchLabels["machineconfiguration.openshift.io/role"] == "worker")
25+
||
26+
(object.spec.machineConfigSelector.matchLabels["machineconfiguration.openshift.io/role"] == "arbiter")
2527
)
2628
)
2729
||

pkg/controller/common/constants.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@ const (
5252
// APIServerInstanceName is a singleton name for APIServer configuration
5353
APIServerBootstrapFileLocation = "/etc/mcs/bootstrap/api-server/api-server.yaml"
5454

55+
// MachineConfigPoolArbiter is the MachineConfigPool name given to the arbiter
56+
MachineConfigPoolArbiter = "arbiter"
57+
5558
// MachineConfigPoolMaster is the MachineConfigPool name given to the master
5659
MachineConfigPoolMaster = "master"
5760

pkg/controller/kubelet-config/kubelet_config_nodes.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,8 @@ func (ctrl *Controller) syncNodeConfigHandler(key string) error {
133133
}
134134
}
135135
// The following code updates the MC with the relevant CGroups version
136-
if role == ctrlcommon.MachineConfigPoolWorker || role == ctrlcommon.MachineConfigPoolMaster {
136+
switch role {
137+
case ctrlcommon.MachineConfigPoolWorker, ctrlcommon.MachineConfigPoolMaster, ctrlcommon.MachineConfigPoolArbiter:
137138
err = updateMachineConfigwithCgroup(nodeConfig, mc)
138139
if err != nil {
139140
return err

pkg/controller/template/render.go

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,9 @@ const (
4747
platformBase = "_base"
4848
platformOnPrem = "on-prem"
4949
sno = "sno"
50+
masterRole = "master"
51+
workerRole = "worker"
52+
arbiterRole = "arbiter"
5053
)
5154

5255
// generateTemplateMachineConfigs returns MachineConfig objects from the templateDir and a config object
@@ -80,6 +83,11 @@ func generateTemplateMachineConfigs(config *RenderConfig, templateDir string) ([
8083
continue
8184
}
8285

86+
// Avoid creating resources for non arbiter deployments
87+
if role == arbiterRole && !hasControlPlaneTopology(config, configv1.HighlyAvailableArbiterMode) {
88+
continue
89+
}
90+
8391
roleConfigs, err := GenerateMachineConfigsForRole(config, role, templateDir)
8492
if err != nil {
8593
return nil, fmt.Errorf("failed to create MachineConfig for role %s: %w", role, err)
@@ -102,10 +110,10 @@ func generateTemplateMachineConfigs(config *RenderConfig, templateDir string) ([
102110
func GenerateMachineConfigsForRole(config *RenderConfig, role, templateDir string) ([]*mcfgv1.MachineConfig, error) {
103111
rolePath := role
104112
//nolint:goconst
105-
if role != "worker" && role != "master" {
113+
if role != workerRole && role != masterRole && role != arbiterRole {
106114
// custom pools are only allowed to be worker's children
107115
// and can reuse the worker templates
108-
rolePath = "worker"
116+
rolePath = workerRole
109117
}
110118

111119
path := filepath.Join(templateDir, rolePath)
@@ -219,7 +227,7 @@ func getPaths(config *RenderConfig, platformString string) []string {
219227
platformBasedPaths = append(platformBasedPaths, platformString)
220228

221229
// sno is specific case and it should override even specific platform files
222-
if config.Infra.Status.ControlPlaneTopology == configv1.SingleReplicaTopologyMode {
230+
if hasControlPlaneTopology(config, configv1.SingleReplicaTopologyMode) {
223231
platformBasedPaths = append(platformBasedPaths, sno)
224232
}
225233

@@ -799,3 +807,12 @@ func cloudPlatformLoadBalancerIPState(cfg RenderConfig) LoadBalancerIPState {
799807
}
800808
return lbIPState
801809
}
810+
811+
// hasControlPlaneTopology returns true if the topology matches the infra.controlPlaneTopology
812+
// checks to make sure RenderConfig and Infra are not nil.
813+
func hasControlPlaneTopology(r *RenderConfig, topo configv1.TopologyMode) bool {
814+
if r == nil || r.Infra == nil {
815+
return false
816+
}
817+
return r.Infra.Status.ControlPlaneTopology == topo
818+
}

pkg/controller/template/render_test.go

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,7 @@ var (
203203
configs = map[string]string{
204204
"aws": "./test_data/controller_config_aws.yaml",
205205
"baremetal": "./test_data/controller_config_baremetal.yaml",
206+
"baremetal-arbiter": "./test_data/controller_config_baremetal_arbiter.yaml",
206207
"gcp": "./test_data/controller_config_gcp.yaml",
207208
"openstack": "./test_data/controller_config_openstack.yaml",
208209
"libvirt": "./test_data/controller_config_libvirt.yaml",
@@ -281,7 +282,7 @@ func TestGenerateMachineConfigs(t *testing.T) {
281282
if err != nil {
282283
t.Errorf("Failed to parse Ignition config for %s, %s, error: %v", config, cfg.Name, err)
283284
}
284-
if role == "master" {
285+
if role == masterRole {
285286
if !foundPullSecretMaster {
286287
foundPullSecretMaster = findIgnFile(ign.Storage.Files, "/var/lib/kubelet/config.json", t)
287288
}
@@ -292,7 +293,7 @@ func TestGenerateMachineConfigs(t *testing.T) {
292293
foundMTUMigrationMaster = findIgnFile(ign.Storage.Files, "/usr/local/bin/mtu-migration.sh", t)
293294
foundMTUMigrationMaster = foundMTUMigrationMaster || findIgnFile(ign.Storage.Files, "/etc/systemd/system/mtu-migration.service", t)
294295
}
295-
} else if role == "worker" {
296+
} else if role == workerRole {
296297
if !foundPullSecretWorker {
297298
foundPullSecretWorker = findIgnFile(ign.Storage.Files, "/var/lib/kubelet/config.json", t)
298299
}
@@ -303,6 +304,18 @@ func TestGenerateMachineConfigs(t *testing.T) {
303304
foundMTUMigrationWorker = findIgnFile(ign.Storage.Files, "/usr/local/bin/mtu-migration.sh", t)
304305
foundMTUMigrationWorker = foundMTUMigrationWorker || findIgnFile(ign.Storage.Files, "/etc/systemd/system/mtu-migration.service", t)
305306
}
307+
} else if role == arbiterRole {
308+
// arbiter role currently follows master output
309+
if !foundPullSecretMaster {
310+
foundPullSecretMaster = findIgnFile(ign.Storage.Files, "/var/lib/kubelet/config.json", t)
311+
}
312+
if !foundKubeletUnitMaster {
313+
foundKubeletUnitMaster = findIgnUnit(ign.Systemd.Units, "kubelet.service", t)
314+
}
315+
if !foundMTUMigrationMaster {
316+
foundMTUMigrationMaster = findIgnFile(ign.Storage.Files, "/usr/local/bin/mtu-migration.sh", t)
317+
foundMTUMigrationMaster = foundMTUMigrationMaster || findIgnFile(ign.Storage.Files, "/etc/systemd/system/mtu-migration.service", t)
318+
}
306319
} else {
307320
t.Fatalf("Unknown role %s", role)
308321
}
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
apiVersion: "machineconfigurations.openshift.io/v1"
2+
kind: "ControllerConfig"
3+
spec:
4+
clusterDNSIP: "10.3.0.10"
5+
cloudProviderConfig: ""
6+
etcdInitialCount: 3
7+
etcdCAData: ZHVtbXkgZXRjZC1jYQo=
8+
rootCAData: ZHVtbXkgcm9vdC1jYQo=
9+
pullSecret:
10+
data: ZHVtbXkgZXRjZC1jYQo=
11+
images:
12+
etcd: image/etcd:1
13+
setupEtcdEnv: image/setupEtcdEnv:1
14+
infraImage: image/infraImage:1
15+
kubeClientAgentImage: image/kubeClientAgentImage:1
16+
infra:
17+
apiVersion: config.openshift.io/v1
18+
kind: Infrastructure
19+
spec:
20+
cloudConfig:
21+
key: config
22+
name: cloud-provider-config
23+
status:
24+
apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443
25+
apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443
26+
etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems
27+
infrastructureName: my-test-cluster
28+
controlPlaneTopology: HighlyAvailableArbiter
29+
platformStatus:
30+
type: "BareMetal"
31+
baremetal:
32+
apiServerInternalIP: 10.0.0.1
33+
ingressIP: 10.0.0.2
34+
nodeDNSIP: 10.0.0.3
35+
dns:
36+
spec:
37+
baseDomain: my-test-cluster.installer.team.coreos.systems

pkg/daemon/daemon.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2728,6 +2728,8 @@ func (dn *Daemon) getControlPlaneTopology() configv1.TopologyMode {
27282728
return configv1.SingleReplicaTopologyMode
27292729
case configv1.HighlyAvailableTopologyMode:
27302730
return configv1.HighlyAvailableTopologyMode
2731+
case configv1.HighlyAvailableArbiterMode:
2732+
return configv1.HighlyAvailableArbiterMode
27312733
default:
27322734
// for any unhandled case, default to HighlyAvailableTopologyMode
27332735
return configv1.HighlyAvailableTopologyMode

pkg/operator/bootstrap.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ func RenderBootstrap(
155155
templatectrl.KubeRbacProxyKey: imgs.KubeRbacProxy,
156156
}
157157

158-
config := getRenderConfig("", string(filesData[kubeAPIServerServingCA]), spec, &imgs.RenderConfigImages, infra.Status.APIServerInternalURL, nil, []*mcfgv1alpha1.MachineOSConfig{}, nil)
158+
config := getRenderConfig("", string(filesData[kubeAPIServerServingCA]), spec, &imgs.RenderConfigImages, infra, nil, []*mcfgv1alpha1.MachineOSConfig{}, nil)
159159

160160
manifests := []manifest{
161161
{
@@ -182,6 +182,13 @@ func RenderBootstrap(
182182
},
183183
}
184184

185+
if infra.Status.ControlPlaneTopology == configv1.HighlyAvailableArbiterMode {
186+
manifests = append(manifests, manifest{
187+
name: "manifests/arbiter.machineconfigpool.yaml",
188+
filename: "bootstrap/manifests/arbiter.machineconfigpool.yaml",
189+
})
190+
}
191+
185192
manifests = appendManifestsByPlatform(manifests, *infra)
186193

187194
for _, m := range manifests {

pkg/operator/sync.go

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -641,9 +641,9 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig, _ *configv1.ClusterOpera
641641
}
642642

643643
// create renderConfig
644-
optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra.Status.APIServerInternalURL, pointerConfigData, moscs, apiServer)
644+
optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra, pointerConfigData, moscs, apiServer)
645645
} else {
646-
optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra.Status.APIServerInternalURL, pointerConfigData, nil, apiServer)
646+
optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra, pointerConfigData, nil, apiServer)
647647
}
648648

649649
return nil
@@ -682,6 +682,11 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig, _ *configv1.C
682682
"manifests/master.machineconfigpool.yaml",
683683
"manifests/worker.machineconfigpool.yaml",
684684
}
685+
686+
if config.Infra.Status.ControlPlaneTopology == configv1.HighlyAvailableArbiterMode {
687+
mcps = append(mcps, "manifests/arbiter.machineconfigpool.yaml")
688+
}
689+
685690
for _, mcp := range mcps {
686691
mcpBytes, err := renderAsset(config, mcp)
687692
if err != nil {
@@ -778,6 +783,8 @@ func (optr *Operator) syncMachineConfigNodes(_ *renderConfig, _ *configv1.Cluste
778783
pool = "worker"
779784
} else if _, ok = node.Labels["node-role.kubernetes.io/master"]; ok {
780785
pool = "master"
786+
} else if _, ok = node.Labels["node-role.kubernetes.io/arbiter"]; ok {
787+
pool = "arbiter"
781788
}
782789
newMCS := &v1alpha1.MachineConfigNode{
783790
Spec: v1alpha1.MachineConfigNodeSpec{
@@ -2035,7 +2042,7 @@ func setGVK(obj runtime.Object, scheme *runtime.Scheme) error {
20352042
return nil
20362043
}
20372044

2038-
func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.ControllerConfigSpec, imgs *ctrlcommon.RenderConfigImages, apiServerURL string, pointerConfigData []byte, moscs []*mcfgv1alpha1.MachineOSConfig, apiServer *configv1.APIServer) *renderConfig {
2045+
func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.ControllerConfigSpec, imgs *ctrlcommon.RenderConfigImages, infra *configv1.Infrastructure, pointerConfigData []byte, moscs []*mcfgv1alpha1.MachineOSConfig, apiServer *configv1.APIServer) *renderConfig {
20392046
tlsMinVersion, tlsCipherSuites := ctrlcommon.GetSecurityProfileCiphersFromAPIServer(apiServer)
20402047
return &renderConfig{
20412048
TargetNamespace: tnamespace,
@@ -2044,8 +2051,9 @@ func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.C
20442051
ControllerConfig: *ccSpec,
20452052
Images: imgs,
20462053
KubeAPIServerServingCA: kubeAPIServerServingCA,
2047-
APIServerURL: apiServerURL,
2054+
APIServerURL: infra.Status.APIServerInternalURL,
20482055
PointerConfig: string(pointerConfigData),
2056+
Infra: *infra,
20492057
MachineOSConfigs: moscs,
20502058
TLSMinVersion: tlsMinVersion,
20512059
TLSCipherSuites: tlsCipherSuites,

pkg/server/bootstrap_server.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ func NewBootstrapServer(dir, kubeconfig string, ircerts []string) (Server, error
6262
const yamlExt = ".yaml"
6363

6464
func (bsc *bootstrapServer) GetConfig(cr poolRequest) (*runtime.RawExtension, error) {
65-
if cr.machineConfigPool != "master" {
65+
if cr.machineConfigPool != "master" && cr.machineConfigPool != "arbiter" {
6666
return nil, fmt.Errorf("refusing to serve bootstrap configuration to pool %q", cr.machineConfigPool)
6767
}
6868
// 1. Read the Machine Config Pool object.
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
mode: 0644
2+
path: "/etc/systemd/system.conf.d/kubelet-cgroups.conf"
3+
contents:
4+
inline: |
5+
# Turning on Accounting helps track down performance issues.
6+
[Manager]
7+
DefaultCPUAccounting=yes
8+
DefaultMemoryAccounting=yes
9+
DefaultBlockIOAccounting=yes
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
mode: 0755
2+
path: "/usr/local/bin/recover-kubeconfig.sh"
3+
contents:
4+
inline: |
5+
#!/bin/bash
6+
7+
set -eou pipefail
8+
9+
# context
10+
intapi=$(oc get infrastructures.config.openshift.io cluster -o "jsonpath={.status.apiServerInternalURI}")
11+
context="$(oc config current-context)"
12+
# cluster
13+
cluster="$(oc config view -o "jsonpath={.contexts[?(@.name==\"$context\")].context.cluster}")"
14+
server="$(oc config view -o "jsonpath={.clusters[?(@.name==\"$cluster\")].cluster.server}")"
15+
# token
16+
ca_crt_data="$(oc get secret -n openshift-machine-config-operator node-bootstrapper-token -o "jsonpath={.data.ca\.crt}" | base64 --decode)"
17+
namespace="$(oc get secret -n openshift-machine-config-operator node-bootstrapper-token -o "jsonpath={.data.namespace}" | base64 --decode)"
18+
token="$(oc get secret -n openshift-machine-config-operator node-bootstrapper-token -o "jsonpath={.data.token}" | base64 --decode)"
19+
20+
export KUBECONFIG="$(mktemp)"
21+
kubectl config set-credentials "kubelet" --token="$token" >/dev/null
22+
ca_crt="$(mktemp)"; echo "$ca_crt_data" > $ca_crt
23+
kubectl config set-cluster $cluster --server="$intapi" --certificate-authority="$ca_crt" --embed-certs >/dev/null
24+
kubectl config set-context kubelet --cluster="$cluster" --user="kubelet" >/dev/null
25+
kubectl config use-context kubelet >/dev/null
26+
cat "$KUBECONFIG"
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
name: rpm-ostreed.service
2+
dropins:
3+
- name: mco-controlplane-nice.conf
4+
contents: |
5+
# See https://github.com/openshift/machine-config-operator/issues/1897
6+
[Service]
7+
Nice=10
8+
IOSchedulingClass=best-effort
9+
IOSchedulingPriority=6
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
mode: 0644
2+
path: "/etc/kubernetes/manifests/apiserver-watcher.yaml"
3+
contents:
4+
inline: |
5+
apiVersion: v1
6+
kind: Pod
7+
metadata:
8+
name: apiserver-watcher
9+
namespace: openshift-kube-apiserver
10+
spec:
11+
containers:
12+
- name: apiserver-watcher
13+
image: "{{.Images.apiServerWatcherKey}}"
14+
command:
15+
- flock
16+
- --verbose
17+
- --exclusive
18+
- --timeout=300
19+
- /rootfs/run/cloud-routes/apiserver-watcher.lock
20+
- apiserver-watcher
21+
args:
22+
- "run"
23+
- "--health-check-url={{.Infra.Status.APIServerInternalURL}}/readyz"
24+
resources:
25+
requests:
26+
cpu: 20m
27+
memory: 50Mi
28+
terminationMessagePolicy: FallbackToLogsOnError
29+
securityContext:
30+
privileged: true
31+
volumeMounts:
32+
- mountPath: /rootfs
33+
name: rootfs
34+
mountPropagation: HostToContainer
35+
hostNetwork: true
36+
hostPID: true
37+
priorityClassName: system-node-critical
38+
tolerations:
39+
- operator: "Exists"
40+
restartPolicy: Always
41+
volumes:
42+
- name: rootfs
43+
hostPath:
44+
path: /
45+

0 commit comments

Comments
 (0)