diff --git a/.golangci.yml b/.golangci.yml index e74eab63c..c2f56e534 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,3 +1,8 @@ +#issues: +# exclude-dirs: +# - (^|/)vendor($|/) +# - pkg/utils/lifted +# - pkg/scheduler/lifted run: timeout: 10m modules-download-mode: vendor diff --git a/Makefile b/Makefile index 046f2434b..d6c2955ae 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,6 @@ MACOS_TARGETS := clusterlink-controller-manager \ clusterlink-network-manager \ clusterlink-proxy \ clustertree-cluster-manager \ - virtual-cluster-operator \ - node-agent \ scheduler \ # clusterlink-agent and clusterlink-floater only support linux platform @@ -28,8 +26,6 @@ TARGETS := clusterlink-controller-manager \ clusterlink-network-manager \ clusterlink-proxy \ clustertree-cluster-manager \ - virtual-cluster-operator \ - node-agent \ scheduler \ # If GOOS is macOS, assign the value of MACOS_TARGETS to TARGETS diff --git a/cmd/kubenest/node-agent/OWNERS b/cmd/kubenest/node-agent/OWNERS deleted file mode 100644 index f51f906e0..000000000 --- a/cmd/kubenest/node-agent/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -approvers: - - OrangeBao - - duanmengkk - - yuleichun-striving - - village-way -reviewers: - - OrangeBao - - duanmengkk - - yuleichun-striving - - village-way \ No newline at end of file diff --git a/cmd/kubenest/node-agent/app/client/client.go b/cmd/kubenest/node-agent/app/client/client.go deleted file mode 100644 index 932832f19..000000000 --- a/cmd/kubenest/node-agent/app/client/client.go +++ /dev/null @@ -1,414 +0,0 @@ -package client - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "os/signal" - "path/filepath" - "strings" - "sync" - - "github.com/gorilla/websocket" - "github.com/spf13/cobra" - "golang.org/x/term" - - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app/logger" -) - -var ( - log = logger.GetLogger() - ClientCmd = &cobra.Command{ - Use: "client", - Short: "A WebSocket client CLI tool to execute commands and file uploads", - Long: "support execute remote command, upload file and pty", - RunE: func(cmd *cobra.Command, args []string) error { - return cmd.Help() - }, - } - shCmd = &cobra.Command{ - Use: "sh [command]", - Short: "Execute a command via WebSocket", - Long: "Execute command on remote server", - RunE: cmdCmdRun, - Example: `node-agent client sh -u=[user] -p=[pass] -a="127.0.0.1:5678" -o ls -r "-l"`, - } - uploadCmd = &cobra.Command{ - Use: "upload", - Short: "Upload a file via WebSocket", - Long: "upload file to remote servers", - RunE: cmdUploadRun, - Example: `node-agent upload -u=[user] -p=[pass] -a="127.0.0.1:5678" -f /tmp -n=app.go`, - } - ttyCmd = &cobra.Command{ - Use: "tty", - Short: "Execute a command via WebSocket with TTY", - Long: "execute command on remote server use pyt", - RunE: cmdTtyRun, - } - checkCmd = &cobra.Command{ - Use: "check", - Short: "Check the port is open or not", - Long: "Check the port can be assign or not", - RunE: cmdCheckRun, - } - wg sync.WaitGroup - - WsAddr []string // websocket client connect address list - filePath string // the server path to save upload file - fileName string // local file to upload - params []string // New slice to hold multiple command parameters - operation string // operation for client to execute -) - -func cmdCheckRun(cmd *cobra.Command, _ []string) error { - if len(params) != 1 { - log.Errorf("port list is required and port list size must not be greater than 1") - return fmt.Errorf("port list is required and port list size must not be greater than 1") - } - auth, err := getAuth(cmd) - if err != nil { - return err - } - headers := http.Header{ - "Authorization": {"Basic " + auth}, - } - for _, addr := range WsAddr { - wg.Add(1) - go func(addr string) { - defer wg.Done() - wsURL := fmt.Sprintf("wss://%s/check/?port=%s", addr, params[0]) - fmt.Println("Checking port:", wsURL) - err := connectAndHandleMessages(wsURL, headers) - if err != nil { - log.Errorf("failed to check port: %v on %s: %v\n", err, addr, strings.Join(params, "&")) - } - }(addr) - } - wg.Wait() - return nil -} - -var uniqueValuesMap = make(map[string]bool) -var dialer = websocket.DefaultDialer - -func BasicAuth(user, password string) string { - auth := user + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} -func init() { - // #nosec G402 - dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - - ClientCmd.PersistentFlags().StringSliceVarP(&WsAddr, "addr", "a", []string{}, "WebSocket address (e.g., host1:port1,host2:port2)") - - // PreRunE check param - ClientCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { - for _, value := range WsAddr { - if _, exists := uniqueValuesMap[value]; exists { - return errors.New("duplicate values are not allowed") - } - uniqueValuesMap[value] = true - } - return nil - } - - shCmd.Flags().StringArrayVarP(¶ms, "param", "r", []string{}, "Command parameters") - shCmd.Flags().StringVarP(&operation, "operation", "o", "", "Operation to perform") - _ = shCmd.MarkFlagRequired("addr") - - checkCmd.Flags().StringArrayVarP(¶ms, "param", "r", []string{}, "Command parameters") - _ = checkCmd.MarkFlagRequired("addr") - - uploadCmd.Flags().StringVarP(&fileName, "name", "n", "", "Name of the file to upload") - uploadCmd.Flags().StringVarP(&filePath, "path", "f", "", "Path to the file to upload") - // avoid can't show subcommand help and execute subcommand - _ = uploadCmd.MarkFlagRequired("name") - _ = uploadCmd.MarkFlagRequired("path") - - ttyCmd.Flags().StringVarP(&operation, "operation", "o", "", "Operation to perform") - err := ttyCmd.MarkFlagRequired("operation") // Ensure 'operation' flag is required for ttyCmd - if err != nil { - return - } - ClientCmd.AddCommand(shCmd) - ClientCmd.AddCommand(uploadCmd) - ClientCmd.AddCommand(ttyCmd) - ClientCmd.AddCommand(checkCmd) -} - -func cmdTtyRun(cmd *cobra.Command, _ []string) error { - auth, err := getAuth(cmd) - if err != nil { - return err - } - headers := http.Header{ - "Authorization": {"Basic " + auth}, - } - cmdStr := fmt.Sprintf("command=%s", operation) - // execute one every wsAddr - for _, addr := range WsAddr { - wsURL := fmt.Sprintf("wss://%s/tty/?%s", addr, cmdStr) - fmt.Println("Executing tty:", cmdStr, "on", addr) - err := connectTty(wsURL, headers) - if err != nil { - log.Errorf("failed to execute command: %v on %s: %v\n", err, addr, cmdStr) - } - } - return nil -} - -func connectTty(wsURL string, headers http.Header) error { - ws, resp, err := dialer.Dial(wsURL, headers) - defer wsRespClose(resp) - if err != nil { - return fmt.Errorf("WebSocket dial error: %v", err) - } - defer ws.Close() - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - // set raw for control char - oldState, err := term.MakeRaw(int(os.Stdin.Fd())) - if err != nil { - return fmt.Errorf("failed to set raw terminal: %v", err) - } - defer func(fd int, oldState *term.State) { - err := term.Restore(fd, oldState) - if err != nil { - log.Errorf("failed to restore terminal: %v", err) - } - }(int(os.Stdin.Fd()), oldState) - - inputChan := make(chan []byte) - go func() { - buf := make([]byte, 1024) - for { - n, err := os.Stdin.Read(buf) - if err != nil { - log.Println("Read input error:", err) - return - } - inputChan <- buf[0:n] - } - }() - done := make(chan struct{}) - // Read messages from the WebSocket server - go func() { - defer close(done) - for { - _, message, err := ws.ReadMessage() - if err != nil { - log.Infof("ReadMessage: %v", err) - interrupt <- os.Interrupt - return - } - fmt.Printf("%s", message) - } - }() - // Main event loop - go func() { - <-interrupt - // Cleanly close the connection on interrupt - log.Infof("Interrupt received, closing connection...") - if err := ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { - log.Infof("CloseMessage: %v", err) - return - } - }() - - for { - select { - case msg, ok := <-inputChan: - if !ok { - return nil - } - // Send user input to the WebSocket server - if err := ws.WriteMessage(websocket.BinaryMessage, msg); err != nil { - log.Infof("WriteMessage: %v", err) - return err - } - if bytes.Equal(msg, []byte("exit")) { - return nil - } - case <-done: - return nil - } - } -} - -func cmdCmdRun(cmd *cobra.Command, _ []string) error { - if len(operation) == 0 { - log.Errorf("operation is required") - return fmt.Errorf("operation is required") - } - auth, err := getAuth(cmd) - if err != nil { - return err - } - // use set to remove duplicate for wsAddr - return executeWebSocketCommand(auth) -} - -func cmdUploadRun(cmd *cobra.Command, _ []string) error { - auth, err := getAuth(cmd) - if err != nil { - return err - } - return uploadFile(filePath, fileName, auth) -} - -func getAuth(cmd *cobra.Command) (string, error) { - user, _ := cmd.Flags().GetString("user") - password, _ := cmd.Flags().GetString("password") - if len(user) == 0 || len(password) == 0 { - log.Errorf("user and password are required") - return "", fmt.Errorf("user and password are required") - } - auth := BasicAuth(user, password) - return auth, nil -} - -func executeWebSocketCommand(auth string) error { - headers := http.Header{ - "Authorization": {"Basic " + auth}, - } - cmdStr := fmt.Sprintf("command=%s", operation) - // Build params part of the URL - if len(params) > 1 { - paramsStr := "args=" - for _, param := range params { - paramsStr += url.QueryEscape(param) + "&&args=" - } - paramsStr = paramsStr[:len(paramsStr)-7] - cmdStr = fmt.Sprintf("command=%s&&%s", operation, paramsStr) - } - - // execute one every wsAddr - for _, addr := range WsAddr { - wg.Add(1) - go func(addr string) { - defer wg.Done() - wsURL := fmt.Sprintf("wss://%s/cmd/?%s", addr, cmdStr) - fmt.Println("Executing command:", cmdStr, "on", addr) - err := connectAndHandleMessages(wsURL, headers) - if err != nil { - log.Errorf("failed to execute command: %v on %s: %v\n", err, addr, cmdStr) - } - }(addr) - } - wg.Wait() - return nil -} - -func uploadFile(filePath, fileName, auth string) error { - headers := http.Header{ - "Authorization": {"Basic " + auth}, - } - for _, addr := range WsAddr { - wg.Add(1) - go func(addr string) { - defer wg.Done() - wsURL := fmt.Sprintf("wss://%s/upload/?file_name=%s&file_path=%s", addr, url.QueryEscape(filepath.Base(fileName)), url.QueryEscape(filePath)) - fmt.Println("Uploading file:", fileName, "from", filePath, "to", addr) - err := connectAndSendFile(wsURL, headers, fileName) - if err != nil { - log.Errorf("failed to upload file: %v on %s: %v\n", err, addr, fileName) - } - }(addr) - } - wg.Wait() - return nil -} - -func wsRespClose(resp *http.Response) { - if resp != nil && resp.Body != nil { - _ = resp.Body.Close() - } -} - -func connectAndHandleMessages(wsURL string, headers http.Header) error { - ws, resp, err := dialer.Dial(wsURL, headers) - defer wsRespClose(resp) - if err != nil { - return fmt.Errorf("WebSocket dial error: %v", err) - } - defer ws.Close() - - handleMessages(ws) - return nil -} - -func connectAndSendFile(wsURL string, headers http.Header, fileName string) error { - ws, resp, err := dialer.Dial(wsURL, headers) - if err != nil { - return fmt.Errorf("WebSocket dial error: %v", err) - } - defer wsRespClose(resp) - defer ws.Close() - - sendFile(ws, fileName) - - handleMessages(ws) - return nil -} - -func handleMessages(ws *websocket.Conn) { - defer ws.Close() - for { - _, message, err := ws.ReadMessage() - if err != nil { - log.Println("Read message error:", err) - return - } - fmt.Printf("Received message: %s\n", message) - } -} - -func sendFile(ws *websocket.Conn, filePath string) { - //if file not exists, close connection - if _, err := os.Stat(filePath); os.IsNotExist(err) { - log.Errorf("File not exists: %v", err) - err := ws.WriteMessage(websocket.BinaryMessage, []byte("EOF")) - if err != nil { - log.Printf("Write message error: %v", err) - } - return - } - - file, err := os.Open(filePath) - if err != nil { - log.Errorf("File open error: %v", err) - } - defer file.Close() - // 指定每次读取的数据块大小 - bufferSize := 1024 // 例如每次读取 1024 字节 - buffer := make([]byte, bufferSize) - - reader := bufio.NewReader(file) - for { - n, err := reader.Read(buffer) - if err != nil { - // check if EOF - if err.Error() == "EOF" { - break - } - log.Errorf("failed to read file %v:", err) - return - } - dataToSend := buffer[:n] - - _ = ws.WriteMessage(websocket.BinaryMessage, dataToSend) - } - - err = ws.WriteMessage(websocket.BinaryMessage, []byte("EOF")) - log.Infof("send EOF ----") - if err != nil { - log.Errorf("Write message error: %v", err) - } -} diff --git a/cmd/kubenest/node-agent/app/client/client_test.go b/cmd/kubenest/node-agent/app/client/client_test.go deleted file mode 100644 index ff83f7dd0..000000000 --- a/cmd/kubenest/node-agent/app/client/client_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -import ( - "crypto/tls" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app/serve" -) - -// test addr user pass -var testAddr, username, pass string -var headers http.Header - -var currentDir, _ = os.Getwd() -var parentDir string - -func init() { - // #nosec G402 - dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - _, filename, _, _ := runtime.Caller(0) - currentDir = filepath.Dir(filename) - parentDir = filepath.Dir(currentDir) - _ = os.Setenv("WEB_USER", "") - _ = os.Setenv("WEB_PASS", "") - username = os.Getenv("WEB_USER") - pass = os.Getenv("WEB_PASS") - testAddr = "127.0.0.1:5678" - - headers = http.Header{ - "Authorization": {"Basic " + BasicAuth(username, pass)}, - } - go func() { - err := serve.Start(":5678", "cert.pem", "key.pem", username, pass) - if err != nil { - log.Fatal(err) - } - }() - time.Sleep(10 * time.Second) -} - -func TestCmd(_ *testing.T) { - fmt.Println("Command test") - command := url.QueryEscape("ls -l") - ws, resp, err := dialer.Dial("wss://"+testAddr+"/cmd/?command="+command, headers) - defer wsRespClose(resp) - if err != nil { - log.Printf("Dial error: %v (HTTP response: %v)", err, resp) - return - } - defer ws.Close() - - handleMessages(ws) -} - -func TestUpload(_ *testing.T) { - fmt.Println("Upload file test") - fileName := url.QueryEscape("app.go") - filePath := url.QueryEscape("/tmp/websocket") - - ws, resp, err := dialer.Dial("wss://"+testAddr+"/upload/?file_name="+fileName+"&file_path="+filePath, headers) - if err != nil { - log.Printf("Dial error: %v (HTTP response: %v)", err, resp) - return - } - defer wsRespClose(resp) - defer ws.Close() - - sendFile(ws, filepath.Join(currentDir, "app.go")) - handleMessages(ws) -} - -func TestShellScript(_ *testing.T) { - fmt.Println("Shell script test") - - ws, resp, err := dialer.Dial("wss://"+testAddr+"/sh/?args=10&&args=10", headers) - if err != nil { - log.Printf("Dial error: %v (HTTP response: %v)", err, resp) - return - } - defer wsRespClose(resp) - defer ws.Close() - - sendFile(ws, filepath.Join(parentDir, "count.sh")) - handleMessages(ws) -} - -func TestPyScript(_ *testing.T) { - fmt.Println("Python script test") - ws, resp, err := dialer.Dial("wss://"+testAddr+"/py/?args=10&&args=10", headers) - if err != nil { - log.Printf("Dial error: %v (HTTP response: %v)", err, resp) - return - } - defer wsRespClose(resp) - defer ws.Close() - sendFile(ws, filepath.Join(parentDir, "count.py")) - handleMessages(ws) -} diff --git a/cmd/kubenest/node-agent/app/logger/logger.go b/cmd/kubenest/node-agent/app/logger/logger.go deleted file mode 100644 index cd9f36443..000000000 --- a/cmd/kubenest/node-agent/app/logger/logger.go +++ /dev/null @@ -1,27 +0,0 @@ -package logger - -import ( - "io" - "os" - - "github.com/sirupsen/logrus" -) - -var log *logrus.Logger - -func init() { - log = logrus.New() - // setup log - log.Out = os.Stdout - logFile, err := os.OpenFile("app.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - if err == nil { - log.SetOutput(io.MultiWriter(os.Stdout, logFile)) - } else { - log.Info("Failed to log to file, using default stderr") - } - log.SetLevel(logrus.InfoLevel) -} - -func GetLogger() *logrus.Logger { - return log -} diff --git a/cmd/kubenest/node-agent/app/root.go b/cmd/kubenest/node-agent/app/root.go deleted file mode 100644 index e5bdef67c..000000000 --- a/cmd/kubenest/node-agent/app/root.go +++ /dev/null @@ -1,95 +0,0 @@ -package app - -import ( - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app/client" - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app/logger" - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app/serve" -) - -var ( - user string // username for authentication - password string // password for authentication - log = logger.GetLogger() -) - -var RootCmd = &cobra.Command{ - Use: "node-agent", - Short: "node-agent is a tool for node to start websocket server and client", - Long: `node-agent client for connect server to execute command and upload file to node - node-agent serve for start websocket server to receive message from client and download file from client`, - Run: func(cmd *cobra.Command, args []string) { - _ = cmd.Help() - }, -} - -func initConfig() { - // Tell Viper to automatically look for a .env file - //viper.SetConfigFile("agent.env") - viper.SetConfigFile("/srv/node-agent/agent.env") - //currentDir, _ := os.Getwd() - //viper.AddConfigPath(currentDir) - //viper.AddConfigPath("/srv/node-agent/agent.env") - //viper.SetConfigType("toml") - // If a agent.env file is found, read it in. - if err := viper.ReadInConfig(); err != nil { - log.Warnf("Load config file error, %s", err) - } - // set default value from agent.env - if len(user) == 0 { - user = viper.GetString("WEB_USER") - } - if len(password) == 0 { - password = viper.GetString("WEB_PASS") - } -} - -func initWebSocketAddr() { - err := viper.BindPFlag("ADDR", client.ClientCmd.PersistentFlags().Lookup("addr")) - if err != nil { - log.Fatalf("Failed to bind flag: %v", err) - return - } - err = viper.BindEnv("ADDR", "ADDR") - if err != nil { - log.Fatalf("Failed to bind env: %v", err) - return - } - // Initialize addr value from viper - log.Infof(strings.Join(viper.AllKeys(), ",")) - if viper.Get("addr") != nil { - client.WsAddr = viper.GetStringSlice("addr") - log.Infof("addr: %v", client.WsAddr) - } -} - -func init() { - cobra.OnInitialize(initConfig, initWebSocketAddr) - - RootCmd.PersistentFlags().StringVarP(&user, "user", "u", "", "Username for authentication") - RootCmd.PersistentFlags().StringVarP(&password, "password", "p", "", "Password for authentication") - // bind flags to viper - err := viper.BindPFlag("WEB_USER", RootCmd.PersistentFlags().Lookup("user")) - if err != nil { - log.Fatal(err) - } - err = viper.BindPFlag("WEB_PASS", RootCmd.PersistentFlags().Lookup("password")) - if err != nil { - log.Fatal(err) - } - // bind environment variables - err = viper.BindEnv("WEB_USER", "WEB_USER") - if err != nil { - log.Fatal(err) - } - err = viper.BindEnv("WEB_PASS", "WEB_PASS") - if err != nil { - log.Fatal(err) - } - RootCmd.AddCommand(client.ClientCmd) - RootCmd.AddCommand(serve.ServeCmd) -} diff --git a/cmd/kubenest/node-agent/app/serve/serve.go b/cmd/kubenest/node-agent/app/serve/serve.go deleted file mode 100644 index d07debb6d..000000000 --- a/cmd/kubenest/node-agent/app/serve/serve.go +++ /dev/null @@ -1,512 +0,0 @@ -package serve - -import ( - "bufio" - "context" - "crypto/sha256" - "crypto/tls" - "encoding/base64" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "os" - "os/exec" - "os/signal" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/creack/pty" - "github.com/gorilla/websocket" - "github.com/spf13/cobra" - "github.com/spf13/viper" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/clientcmd" - - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app/logger" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" -) - -var ( - ServeCmd = &cobra.Command{ - Use: "serve", - Short: "Start a WebSocket server", - RunE: serveCmdRun, - } - - certFile string // SSL certificate file - keyFile string // SSL key file - addr string // server listen address - nodeName string // server nodename - log = logger.GetLogger() -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { - return true - }, -} // use default options - -const ( - defaultKubeconfig = "/srv/node-agent/kubeconfigpath" -) // kubeconfig for heartbeatCheck - -func init() { - // setup flags - ServeCmd.PersistentFlags().StringVarP(&addr, "addr", "a", ":5678", "websocket service address") - ServeCmd.PersistentFlags().StringVarP(&certFile, "cert", "c", "cert.pem", "SSL certificate file") - ServeCmd.PersistentFlags().StringVarP(&keyFile, "key", "k", "key.pem", "SSL key file") - ServeCmd.PersistentFlags().StringVarP(&nodeName, "nodename", "n", "", "set nodename") -} - -func serveCmdRun(_ *cobra.Command, _ []string) error { - //start heartbeatCheck Goroutine - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if len(nodeName) == 0 { - nodeName = viper.GetString("NODE_NAME") - } - go heartbeatCheck(ctx, nodeName) - - user := viper.GetString("WEB_USER") - password := viper.GetString("WEB_PASS") - port := viper.GetString("WEB_PORT") - if len(user) == 0 || len(password) == 0 { - log.Errorf("-user and -password are required %s %s", user, password) - return errors.New("-user and -password are required") - } - if port != "" { - addr = ":" + port - } - - return Start(addr, certFile, keyFile, user, password) -} - -func heartbeatCheck(ctx context.Context, nodeName string) { - kubeconfigPath := defaultKubeconfig - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - log.Errorf("Failed to load kubeconfig from path %s:%v", kubeconfigPath, err) - return - } - kosmosClient, err := versioned.NewForConfig(config) - if err != nil { - log.Errorf("Failed to get config: %v", err) - return - } - - ticker := time.NewTicker(10 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - log.Infof("Heartbeat for node %s stopped", nodeName) - return - case <-ticker.C: - node, err := kosmosClient.KosmosV1alpha1().GlobalNodes().Get(ctx, nodeName, metav1.GetOptions{}) - if err != nil { - log.Errorf("Failed to get node: %v", err) - } - heartbeatTime := metav1.Now() - - if len(node.Status.Conditions) == 0 { - log.Infof("GlobalNode %s has no conditions, initializing default condition", node.Name) - node.Status.Conditions = []corev1.NodeCondition{ - { - LastHeartbeatTime: heartbeatTime, - }, - } - } else { - node.Status.Conditions[0].LastHeartbeatTime = heartbeatTime - } - if _, err := kosmosClient.KosmosV1alpha1().GlobalNodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}); err != nil { - log.Errorf("update node %s status for globalnode failed, %v", node.Name, err) - } else { - log.Infof("GlobalnodeHeartbeat: successfully updated global node %s, Status.Conditions: %+v", node.Name, node.Status.Conditions) - } - } - } -} - -// start server -func Start(addr, certFile, keyFile, user, password string) error { - passwordHash := sha256.Sum256([]byte(password)) - - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/healthz" || r.URL.Path == "/readyz" { - w.WriteHeader(http.StatusOK) - return - } - - auth := r.Header.Get("Authorization") - if auth == "" { - http.Error(w, "Unauthorized", http.StatusUnauthorized) - return - } - - userPassBase64 := strings.TrimPrefix(auth, "Basic ") - userPassBytes, err := base64.StdEncoding.DecodeString(userPassBase64) - if err != nil { - http.Error(w, "Unauthorized", http.StatusUnauthorized) - return - } - - userPass := strings.SplitN(string(userPassBytes), ":", 2) - if len(userPass) != 2 { - http.Error(w, "Unauthorized", http.StatusUnauthorized) - return - } - - userHash := sha256.Sum256([]byte(userPass[1])) - if userPass[0] != user || userHash != passwordHash { - http.Error(w, "Unauthorized", http.StatusUnauthorized) - return - } - - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Errorf("http upgrade to websocket failed : %v", err) - return - } - defer conn.Close() - - u, err := url.Parse(r.RequestURI) - if err != nil { - log.Errorf("parse uri: %s, %v", r.RequestURI, err) - return - } - queryParams := u.Query() - - switch { - case strings.HasPrefix(r.URL.Path, "/upload"): - handleUpload(conn, queryParams) - case strings.HasPrefix(r.URL.Path, "/cmd"): - handleCmd(conn, queryParams) - case strings.HasPrefix(r.URL.Path, "/py"): - handleScript(conn, queryParams, []string{"python3", "-u"}) - case strings.HasPrefix(r.URL.Path, "/sh"): - handleScript(conn, queryParams, []string{"sh"}) - case strings.HasPrefix(r.URL.Path, "/tty"): - handleTty(conn, queryParams) - case strings.HasPrefix(r.URL.Path, "/check"): - handleCheck(conn, queryParams) - default: - _ = conn.WriteMessage(websocket.TextMessage, []byte("Invalid path")) - } - }) - - log.Infof("Starting server on %s", addr) - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS13, - } - tlsConfig.Certificates = make([]tls.Certificate, 1) - tlsConfig.Certificates[0], _ = tls.LoadX509KeyPair(certFile, keyFile) - server := &http.Server{ - Addr: addr, - TLSConfig: tlsConfig, - ReadHeaderTimeout: 10 * time.Second, - } - - err := server.ListenAndServeTLS("", "") - if err != nil { - log.Errorf("failed to start server %v", err) - } - return err -} - -func handleCheck(conn *websocket.Conn, params url.Values) { - port := params.Get("port") - if len(port) == 0 { - log.Errorf("port is required") - return - } - log.Infof("Check port %s", port) - address := fmt.Sprintf(":%s", port) - listener, err := net.Listen("tcp", address) - if err != nil { - log.Infof("port not avalible %s %v", address, err) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%d", 1))) - return - } - defer listener.Close() - log.Infof("port avalible %s", address) - // _ = conn.WriteMessage(websocket.BinaryMessage, []byte("0")) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%d", 0))) -} - -func handleTty(conn *websocket.Conn, queryParams url.Values) { - entrypoint := queryParams.Get("command") - if len(entrypoint) == 0 { - log.Errorf("command is required") - return - } - log.Infof("Executing command %s", entrypoint) - cmd := exec.Command(entrypoint) - ptmx, err := pty.Start(cmd) - if err != nil { - log.Errorf("failed to start command %v", err) - return - } - defer func() { - _ = ptmx.Close() - }() - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGWINCH) - go func() { - for range ch { - if err := pty.InheritSize(os.Stdin, ptmx); err != nil { - log.Errorf("error resizing pty: %s", err) - } - } - }() - ch <- syscall.SIGWINCH // Initial resize. - defer func() { signal.Stop(ch); close(ch) }() // Cleanup signals when done. - done := make(chan struct{}) - // Use a goroutine to copy PTY output to WebSocket - go func() { - buf := make([]byte, 1024) - for { - n, err := ptmx.Read(buf) - if err != nil { - log.Errorf("PTY read error: %v", err) - break - } - log.Printf("Received message: %s", buf[:n]) - if err := conn.WriteMessage(websocket.BinaryMessage, buf[:n]); err != nil { - log.Errorf("WebSocket write error: %v", err) - break - } - } - done <- struct{}{} - }() - // echo off - //ptmx.Write([]byte("stty -echo\n")) - // Set stdin in raw mode. - //oldState, err := term.MakeRaw(int(ptmx.Fd())) - //if err != nil { - // panic(err) - //} - //defer func() { _ = term.Restore(int(ptmx.Fd()), oldState) }() // Best effort. - - // Disable Bracketed Paste Mode in bash shell - // _, err = ptmx.Write([]byte("printf '\\e[?2004l'\n")) - // if err != nil { - // log.Fatal(err) - // } - - // Use a goroutine to copy WebSocket input to PTY - go func() { - for { - _, message, err := conn.ReadMessage() - if err != nil { - log.Printf("read from websocket failed: %v, %s", err, string(message)) - break - } - log.Printf("Received message: %s", message) // Debugging line - if _, err := ptmx.Write(message); err != nil { // Ensure newline character for commands - log.Printf("PTY write error: %v", err) - break - } - } - // Signal the done channel when this goroutine finishes - done <- struct{}{} - }() - - // Wait for the done channel to be closed - <-done -} - -func handleUpload(conn *websocket.Conn, params url.Values) { - fileName := params.Get("file_name") - filePath := params.Get("file_path") - log.Infof("Uploading file name %s, file path %s", fileName, filePath) - defer conn.Close() - if len(fileName) != 0 && len(filePath) != 0 { - // mkdir - err := os.MkdirAll(filePath, 0775) - if err != nil { - log.Errorf("mkdir: %s %v", filePath, err) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, fmt.Sprintf("failed to make directory: %v", err))) - return - } - file := filepath.Join(filePath, fileName) - // check if the file already exists - if _, err := os.Stat(file); err == nil { - log.Infof("File %s already exists", file) - timestamp := time.Now().Format("2006-01-02-150405000") - bakFilePath := fmt.Sprintf("%s_%s_bak", file, timestamp) - err = os.Rename(file, bakFilePath) - if err != nil { - log.Errorf("failed to rename file: %v", err) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, fmt.Sprintf("failed to rename file: %v", err))) - return - } - } - // create file with append - fp, err := os.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - log.Errorf("failed to open file: %v", err) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, fmt.Sprintf("failed to open file: %v", err))) - return - } - defer fp.Close() - // receive data from websocket - for { - _, data, err := conn.ReadMessage() - if err != nil { - log.Errorf("failed to read message : %s", err) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, fmt.Sprintf("failed to read message: %v", err))) - return - } - // check if the file end - if string(data) == "EOF" { - log.Infof("finish file data transfer %s", file) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%d", 0))) - return - } - // data to file - _, err = fp.Write(data) - if err != nil { - log.Errorf("failed to write data to file : %s", err) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, fmt.Sprintf("failed write data to file: %v", err))) - return - } - } - } - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, "Invalid file_name or file_path")) -} - -/* -0 → success -non-zero → failure -Exit code 1 indicates a general failure -Exit code 2 indicates incorrect use of shell builtins -Exit codes 3-124 indicate some error in job (check software exit codes) -Exit code 125 indicates out of memory -Exit code 126 indicates command cannot execute -Exit code 127 indicates command not found -Exit code 128 indicates invalid argument to exit -Exit codes 129-192 indicate jobs terminated by Linux signals -For these, subtract 128 from the number and match to signal code -Enter kill -l to list signal codes -Enter man signal for more information -*/ -func handleCmd(conn *websocket.Conn, params url.Values) { - command := params.Get("command") - args := params["args"] - // if the command is file, the file should have execute permission - if command == "" { - log.Warnf("No command specified %v", params) - _ = conn.WriteMessage(websocket.TextMessage, []byte("No command specified")) - return - } - execCmd(conn, command, args) -} - -func handleScript(conn *websocket.Conn, params url.Values, command []string) { - defer conn.Close() - args := params["args"] - if len(args) == 0 { - _ = conn.WriteMessage(websocket.TextMessage, []byte("No command specified")) - } - // Write data to a temporary file - tempFile, err := os.CreateTemp("", "script_*") - if err != nil { - log.Errorf("Error creating temporary file: %v", err) - return - } - defer os.Remove(tempFile.Name()) // Clean up temporary file - defer tempFile.Close() - tempFilefp, err := os.OpenFile(tempFile.Name(), os.O_APPEND|os.O_WRONLY, 0644) - if err != nil { - log.Errorf("Error opening temporary file: %v", err) - } - for { - // Read message from WebSocket client - _, data, err := conn.ReadMessage() - if err != nil { - log.Errorf("failed to read message : %s", err) - break - } - if string(data) == "EOF" { - log.Infof("finish file data transfer %s", tempFile.Name()) - break - } - - // Write received data to the temporary file - if _, err := tempFilefp.Write(data); err != nil { - log.Errorf("Error writing data to temporary file: %v", err) - continue - } - } - executeCmd := append(command, tempFile.Name()) - executeCmd = append(executeCmd, args...) - // Execute the Python script - execCmd(conn, executeCmd[0], executeCmd[1:]) -} - -func execCmd(conn *websocket.Conn, command string, args []string) { - // #nosec G204 - cmd := exec.Command(command, args...) - log.Infof("Executing command: %s, %v", command, args) - stdout, err := cmd.StdoutPipe() - if err != nil { - log.Warnf("Error obtaining command output pipe: %v", err) - } - defer stdout.Close() - - stderr, err := cmd.StderrPipe() - if err != nil { - log.Warnf("Error obtaining command error pipe: %v", err) - } - defer stderr.Close() - - // Channel for signaling command completion - doneCh := make(chan struct{}) - defer close(doneCh) - // processOutput - go func() { - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - data := scanner.Bytes() - log.Warnf("%s", data) - _ = conn.WriteMessage(websocket.TextMessage, data) - } - scanner = bufio.NewScanner(stderr) - for scanner.Scan() { - data := scanner.Bytes() - log.Warnf("%s", data) - _ = conn.WriteMessage(websocket.TextMessage, data) - } - doneCh <- struct{}{} - }() - if err := cmd.Start(); err != nil { - errStr := strings.ToLower(err.Error()) - log.Warnf("Error starting command: %v, %s", err, errStr) - _ = conn.WriteMessage(websocket.TextMessage, []byte(errStr)) - if strings.Contains(errStr, "no such file") { - exitCode := 127 - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%d", exitCode))) - } - } - - // Wait for the command to finish - if err := cmd.Wait(); err != nil { - var exitError *exec.ExitError - if errors.As(err, &exitError) { - log.Warnf("Command : %s exited with non-zero status: %v", command, exitError) - } - } - <-doneCh - exitCode := cmd.ProcessState.ExitCode() - log.Infof("Command : %s finished with exit code %d", command, exitCode) - _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%d", exitCode))) -} diff --git a/cmd/kubenest/node-agent/main.go b/cmd/kubenest/node-agent/main.go deleted file mode 100644 index e2eca2fb0..000000000 --- a/cmd/kubenest/node-agent/main.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "log" - - "github.com/kosmos.io/kosmos/cmd/kubenest/node-agent/app" -) - -func main() { - if err := app.RootCmd.Execute(); err != nil { - log.Fatal(err) - } -} diff --git a/cmd/kubenest/operator/OWNERS b/cmd/kubenest/operator/OWNERS deleted file mode 100644 index 63c9eb2aa..000000000 --- a/cmd/kubenest/operator/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: - - duanmengkk - - GreatLazyMan - - yuleichun-striving -reviewers: - - GreatLazyMan - - yuleichun-striving diff --git a/cmd/kubenest/operator/app/config/config.go b/cmd/kubenest/operator/app/config/config.go deleted file mode 100644 index 0d06e06f6..000000000 --- a/cmd/kubenest/operator/app/config/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package config - -import ( - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - componentbaseconfig "k8s.io/component-base/config" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" -) - -// Config has all the configurations for kubenest. -type Config struct { - KubeNestOptions v1alpha1.KubeNestConfiguration - Client clientset.Interface - RestConfig *restclient.Config - KubeconfigStream []byte - // LeaderElection is optional. - LeaderElection componentbaseconfig.LeaderElectionConfiguration -} diff --git a/cmd/kubenest/operator/app/operator.go b/cmd/kubenest/operator/app/operator.go deleted file mode 100644 index 267f8eb51..000000000 --- a/cmd/kubenest/operator/app/operator.go +++ /dev/null @@ -1,330 +0,0 @@ -package app - -import ( - "context" - "fmt" - "os" - - "github.com/spf13/cobra" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/client-go/kubernetes" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - cliflag "k8s.io/component-base/cli/flag" - "k8s.io/klog/v2" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/kosmos.io/kosmos/cmd/kubenest/operator/app/config" - "github.com/kosmos.io/kosmos/cmd/kubenest/operator/app/options" - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/controller" - endpointscontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/endpoints.sync.controller" - glnodecontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/global.node.controller" - kosmos "github.com/kosmos.io/kosmos/pkg/kubenest/controller/kosmos" - vcnodecontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller" - "github.com/kosmos.io/kosmos/pkg/scheme" - "github.com/kosmos.io/kosmos/pkg/sharedcli/klogflag" -) - -func NewVirtualClusterOperatorCommand(ctx context.Context) *cobra.Command { - opts := options.NewOptions() - - cmd := &cobra.Command{ - Use: "virtual-cluster-operator", - Long: `create virtual kubernetes control plane with VirtualCluster`, - RunE: func(cmd *cobra.Command, args []string) error { - return runCommand(ctx, opts) - }, - } - - fss := cliflag.NamedFlagSets{} - - genericFlagSet := fss.FlagSet("generic") - opts.AddFlags(genericFlagSet) - - logsFlagSet := fss.FlagSet("logs") - klogflag.Add(logsFlagSet) - - cmd.Flags().AddFlagSet(genericFlagSet) - cmd.Flags().AddFlagSet(logsFlagSet) - - return cmd -} - -func runCommand(ctx context.Context, opts *options.Options) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - kc, err := SetupConfig(opts) - if err != nil { - return err - } - return run(ctx, kc) -} - -func SetupConfig(opts *options.Options) (*config.Config, error) { - c := &config.Config{} - - var koc v1alpha1.KubeNestConfiguration - if len(opts.ConfigFile) != 0 { - ko, err := loadConfig(opts.ConfigFile) - if err != nil { - return nil, err - } - koc = *ko - } else { - ko := &v1alpha1.KubeNestConfiguration{} - ko.KubeNestType = v1alpha1.KubeInKube - ko.KosmosKubeConfig.AllowNodeOwnbyMulticluster = false - ko.KubeInKubeConfig.ForceDestroy = opts.DeprecatedOptions.KubeInKubeConfig.ForceDestroy - ko.KubeInKubeConfig.ETCDUnitSize = opts.DeprecatedOptions.KubeInKubeConfig.ETCDUnitSize - ko.KubeInKubeConfig.ETCDStorageClass = opts.DeprecatedOptions.KubeInKubeConfig.ETCDStorageClass - ko.KubeInKubeConfig.AdmissionPlugins = opts.DeprecatedOptions.KubeInKubeConfig.AdmissionPlugins - ko.KubeInKubeConfig.AnpMode = opts.DeprecatedOptions.KubeInKubeConfig.AnpMode - ko.KubeInKubeConfig.APIServerReplicas = opts.DeprecatedOptions.KubeInKubeConfig.APIServerReplicas - ko.KubeInKubeConfig.ClusterCIDR = opts.DeprecatedOptions.KubeInKubeConfig.ClusterCIDR - - koc = *ko - } - - fillInForDefault(c, koc) - printKubeNestConfiguration(koc) - - kubeconfigStream, err := os.ReadFile(opts.KubernetesOptions.KubeConfig) - if err != nil { - return nil, fmt.Errorf("read kubeconfig file failed: %v", err) - } - - // Prepare kube config. - kubeConfig, err := createKubeConfig(opts) - if err != nil { - return nil, err - } - - // Prepare kube clients. - client, err := createClients(kubeConfig) - if err != nil { - return nil, err - } - - c.KubeconfigStream = kubeconfigStream - c.RestConfig = kubeConfig - c.Client = client - c.LeaderElection = opts.LeaderElection - c.KubeNestOptions = koc - - return c, nil -} - -// TODO -func printKubeNestConfiguration(_ v1alpha1.KubeNestConfiguration) { - -} - -// TODO -func fillInForDefault(_ *config.Config, _ v1alpha1.KubeNestConfiguration) { - -} - -func loadConfig(file string) (*v1alpha1.KubeNestConfiguration, error) { - data, err := os.ReadFile(file) - if err != nil { - return nil, err - } - // The UniversalDecoder runs defaulting and returns the internal type by default. - obj, gvk, err := scheme.Codecs.UniversalDecoder().Decode(data, nil, nil) - if err != nil { - return nil, err - } - if cfgObj, ok := obj.(*v1alpha1.KubeNestConfiguration); ok { - return cfgObj, nil - } - return nil, fmt.Errorf("couldn't decode as KubeNestConfiguration, got %s: ", gvk) -} - -// createClients creates a kube client and an event client from the given kubeConfig -func createClients(kubeConfig *restclient.Config) (clientset.Interface, error) { - client, err := clientset.NewForConfig(kubeConfig) - if err != nil { - return nil, err - } - - return client, nil -} - -// createKubeConfig creates a kubeConfig from the given config and masterOverride. -func createKubeConfig(opts *options.Options) (*restclient.Config, error) { - if len(opts.KubernetesOptions.KubeConfig) == 0 && len(opts.KubernetesOptions.Master) == 0 { - klog.Warning("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") - } - - // This creates a client, first loading any specified kubeconfig - // file, and then overriding the Master flag, if non-empty. - kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: opts.KubernetesOptions.KubeConfig}, - &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: opts.KubernetesOptions.Master}}).ClientConfig() - if err != nil { - return nil, err - } - - kubeConfig.DisableCompression = true - kubeConfig.QPS = opts.KubernetesOptions.QPS - kubeConfig.Burst = opts.KubernetesOptions.Burst - - return kubeConfig, nil -} - -func startEndPointsControllers(mgr manager.Manager) error { - restConfig := mgr.GetConfig() - - kubeClient, err := kubernetes.NewForConfig(restConfig) - if err != nil { - return err - } - - coreEndPointsController := endpointscontroller.CoreDNSController{ - Client: mgr.GetClient(), - EventRecorder: mgr.GetEventRecorderFor(constants.GlobalNodeControllerName), - } - - if err := coreEndPointsController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", endpointscontroller.CoreDNSSyncControllerName, err) - } - - KonnectivityEndPointsController := endpointscontroller.KonnectivityController{ - Client: mgr.GetClient(), - EventRecorder: mgr.GetEventRecorderFor(constants.GlobalNodeControllerName), - } - - if err := KonnectivityEndPointsController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", endpointscontroller.KonnectivitySyncControllerName, err) - } - - nodeGetter := &endpointscontroller.RealNodeGetter{} - APIServerExternalSyncController := endpointscontroller.APIServerExternalSyncController{ - Client: mgr.GetClient(), - EventRecorder: mgr.GetEventRecorderFor(constants.GlobalNodeControllerName), - KubeClient: kubeClient, - NodeGetter: nodeGetter, - } - - if err := APIServerExternalSyncController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", endpointscontroller.APIServerExternalSyncControllerName, err) - } - - return nil -} - -func run(ctx context.Context, config *config.Config) error { - newscheme := scheme.NewSchema() - err := apiextensionsv1.AddToScheme(newscheme) - if err != nil { - panic(err) - } - - mgr, err := controllerruntime.NewManager(config.RestConfig, controllerruntime.Options{ - Logger: klog.Background(), - Scheme: newscheme, - LeaderElection: config.LeaderElection.LeaderElect, - LeaderElectionID: config.LeaderElection.ResourceName, - LeaderElectionNamespace: config.LeaderElection.ResourceNamespace, - LivenessEndpointName: "/healthz", - ReadinessEndpointName: "/readyz", - HealthProbeBindAddress: ":8081", - }) - if err != nil { - return fmt.Errorf("failed to build controller manager: %v", err) - } - - err = mgr.AddHealthzCheck("healthz", healthz.Ping) - if err != nil { - return fmt.Errorf("failed to build healthz: %v", err) - } - - err = mgr.AddReadyzCheck("readyz", healthz.Ping) - if err != nil { - return fmt.Errorf("failed to build readyz: %v", err) - } - - hostKubeClient, err := kubernetes.NewForConfig(config.RestConfig) - if err != nil { - return fmt.Errorf("could not create clientset: %v", err) - } - - kosmosClient, err := versioned.NewForConfig(config.RestConfig) - if err != nil { - return fmt.Errorf("could not create clientset: %v", err) - } - - VirtualClusterInitController := controller.VirtualClusterInitController{ - Client: mgr.GetClient(), - Config: mgr.GetConfig(), - EventRecorder: mgr.GetEventRecorderFor(constants.InitControllerName), - RootClientSet: hostKubeClient, - KosmosClient: kosmosClient, - KubeNestOptions: &config.KubeNestOptions, - } - if err = VirtualClusterInitController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", constants.InitControllerName, err) - } - - GlobalNodeController := glnodecontroller.GlobalNodeController{ - Client: mgr.GetClient(), - RootClientSet: hostKubeClient, - KosmosClient: kosmosClient, - EventRecorder: mgr.GetEventRecorderFor(constants.GlobalNodeControllerName), - } - - if err = GlobalNodeController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", constants.GlobalNodeControllerName, err) - } - - GlobalNodeConditionController := glnodecontroller.NewGlobalNodeStatusController( - mgr.GetClient(), - kosmosClient, - ) - if err := mgr.Add(GlobalNodeConditionController); err != nil { - return fmt.Errorf("error starting %s: %v", glnodecontroller.GlobalNodeStatusControllerName, err) - } - - if err := startEndPointsControllers(mgr); err != nil { - return err - } - - VirtualClusterNodeController := vcnodecontroller.NewNodeController( - mgr.GetClient(), - hostKubeClient, - mgr.GetEventRecorderFor(constants.NodeControllerName), - kosmosClient, - &config.KubeNestOptions, - ) - - if err = VirtualClusterNodeController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", constants.NodeControllerName, err) - } - - if config.KubeNestOptions.KubeNestType == v1alpha1.KosmosKube { - KosmosJoinController := kosmos.KosmosJoinController{ - Client: mgr.GetClient(), - EventRecorder: mgr.GetEventRecorderFor(constants.KosmosJoinControllerName), - KubeConfig: config.RestConfig, - KubeconfigStream: config.KubeconfigStream, - AllowNodeOwnbyMulticluster: config.KubeNestOptions.KosmosKubeConfig.AllowNodeOwnbyMulticluster, - } - if err = KosmosJoinController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", constants.KosmosJoinControllerName, err) - } - } - - if err := mgr.Start(ctx); err != nil { - return fmt.Errorf("failed to start controller manager: %v", err) - } - - return nil -} diff --git a/cmd/kubenest/operator/app/options/options.go b/cmd/kubenest/operator/app/options/options.go deleted file mode 100644 index 83fe89bec..000000000 --- a/cmd/kubenest/operator/app/options/options.go +++ /dev/null @@ -1,73 +0,0 @@ -package options - -import ( - "github.com/spf13/pflag" - "k8s.io/client-go/tools/leaderelection/resourcelock" - componentbaseconfig "k8s.io/component-base/config" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type Options struct { - LeaderElection componentbaseconfig.LeaderElectionConfiguration - KubernetesOptions KubernetesOptions - DeprecatedOptions v1alpha1.KubeNestConfiguration - AllowNodeOwnbyMulticluster bool - KosmosJoinController bool - - // ConfigFile is the location of the kubenest's configuration file. - ConfigFile string -} - -type KubernetesOptions struct { - KubeConfig string - Master string - QPS float32 - Burst int -} - -type KubeNestOptions struct { - ForceDestroy bool - AnpMode string - AdmissionPlugins bool - APIServerReplicas int - ClusterCIDR string - ETCDStorageClass string - ETCDUnitSize string -} - -func NewOptions() *Options { - return &Options{ - LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: true, - ResourceLock: resourcelock.LeasesResourceLock, - ResourceNamespace: utils.DefaultNamespace, - ResourceName: "virtual-cluster-controller", - }, - } -} - -func (o *Options) AddFlags(flags *pflag.FlagSet) { - if o == nil { - return - } - - flags.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.") - flags.StringVar(&o.LeaderElection.ResourceName, "leader-elect-resource-name", "operator", "The name of resource object that is used for locking during leader election.") - flags.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", utils.DefaultNamespace, "The namespace of resource object that is used for locking during leader election.") - flags.Float32Var(&o.KubernetesOptions.QPS, "kube-qps", 40.0, "QPS to use while talking with kube-apiserver.") - flags.IntVar(&o.KubernetesOptions.Burst, "kube-burst", 60, "Burst to use while talking with kube-apiserver.") - flags.StringVar(&o.KubernetesOptions.KubeConfig, "kubeconfig", "", "Path for kubernetes kubeconfig file, if left blank, will use in cluster way.") - flags.StringVar(&o.KubernetesOptions.Master, "master", "", "Used to generate kubeconfig for downloading, if not specified, will use host in kubeconfig.") - flags.BoolVar(&o.AllowNodeOwnbyMulticluster, "multiowner", false, "Allow node own by multicluster or not.") - flags.BoolVar(&o.KosmosJoinController, "kosmos-join-controller", false, "Turn on or off kosmos-join-controller.") - flags.BoolVar(&o.DeprecatedOptions.KubeInKubeConfig.ForceDestroy, "kube-nest-force-destroy", false, "Force destroy the node.If it set true.If set to true, Kubernetes will not evict the existing nodes on the node when joining nodes to the tenant's control plane, but will instead force destroy.") - flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.AnpMode, "kube-nest-anp-mode", "tcp", "kube-apiserver network proxy mode, must be set to tcp or uds. uds mode the replicas for apiserver should be one, and tcp for multi apiserver replicas.") - flags.BoolVar(&o.DeprecatedOptions.KubeInKubeConfig.AdmissionPlugins, "kube-nest-admission-plugins", false, "kube-apiserver network disable-admission-plugins, false for - --disable-admission-plugins=License, true for remove the --disable-admission-plugins=License flag .") - flags.IntVar(&o.DeprecatedOptions.KubeInKubeConfig.APIServerReplicas, "kube-nest-apiserver-replicas", 1, "virtual-cluster kube-apiserver replicas. default is 2.") - flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.ClusterCIDR, "cluster-cidr", "10.244.0.0/16", "Used to set the cluster-cidr of kube-controller-manager and kube-proxy (configmap)") - flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.ETCDStorageClass, "etcd-storage-class", "openebs-hostpath", "Used to set the etcd storage class.") - flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.ETCDUnitSize, "etcd-unit-size", "1Gi", "Used to set the etcd unit size, each node is allocated storage of etcd-unit-size.") - flags.StringVar(&o.ConfigFile, "config", "", "The path to the configuration file.") -} diff --git a/cmd/kubenest/operator/main.go b/cmd/kubenest/operator/main.go deleted file mode 100644 index 090b575d0..000000000 --- a/cmd/kubenest/operator/main.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "os" - - "k8s.io/component-base/cli" - ctrl "sigs.k8s.io/controller-runtime" - - "github.com/kosmos.io/kosmos/cmd/kubenest/operator/app" -) - -func main() { - ctx := ctrl.SetupSignalHandler() - cmd := app.NewVirtualClusterOperatorCommand(ctx) - code := cli.Run(cmd) - os.Exit(code) -} diff --git a/deploy/crds/kosmos.io_globalnodes.yaml b/deploy/crds/kosmos.io_globalnodes.yaml index 3a255d305..4d9ace62f 100644 --- a/deploy/crds/kosmos.io_globalnodes.yaml +++ b/deploy/crds/kosmos.io_globalnodes.yaml @@ -20,7 +20,7 @@ spec: name: NODE_IP type: string - jsonPath: .status.conditions[0].type - name: TYPE + name: Type type: string - jsonPath: .spec.state name: STATE diff --git a/hack/generate_globalnode.sh b/hack/generate_globalnode.sh deleted file mode 100755 index b8020f8b9..000000000 --- a/hack/generate_globalnode.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -if [ -z "$KUBECONFIG" ]; then - echo "KUBECONFIG环境变量未设置." - exit 1 -fi - -# Creating a directory for logs -mkdir -p kube_apply_logs - -nodes=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') -for node in ${nodes}; do - nodeIP=$(kubectl get node ${node} -o jsonpath='{.status.addresses[0].address}') - labels=$(kubectl get node ${node} -o jsonpath='{.metadata.labels}') - - # Use jq to ensure all values are strings, but also explicitly add quotes in the YAML formatting step below - labelsFormatted=$(echo "$labels" | jq -r 'to_entries | map(.value |= tostring) | .[] | " \(.key): \"\(.value)\""') - - yamlContent=" -apiVersion: kosmos.io/v1alpha1 -kind: GlobalNode -metadata: - name: ${node} -spec: - state: \"reserved\" - nodeIP: \"${nodeIP}\" - labels: -$(echo "${labelsFormatted}" | awk '{print " " $0}') -" - - # Log the YAML content to a file for inspection - echo "$yamlContent" > kube_apply_logs/${node}.yaml - - # Apply the YAML - echo "$yamlContent" | kubectl apply -f - - - -done -# clear resources -rm -rf kube_apply_logs \ No newline at end of file diff --git a/hack/k8s-in-k8s/generate_env.sh b/hack/k8s-in-k8s/generate_env.sh deleted file mode 100644 index b55a9969e..000000000 --- a/hack/k8s-in-k8s/generate_env.sh +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/env bash - -# This script will generate an g.env.sh file, like the following: -# #!/usr/bin/env bash - -# # ##### -# # Generate by script generate_env.sh -# # ##### - -# SCRIPT_VERSION=0.0.1 -# # tmp dir of kosmos -# PATH_FILE_TMP=/apps/conf/kosmos/tmp -# ################################################## -# # path for kubeadm config -# PATH_KUBEADM_CONFIG=/etc/kubeadm -# ################################################## -# # path for kubernetes, from kubelet args --config -# PATH_KUBERNETES=/etc/kubernetes -# PATH_KUBERNETES_PKI=/etc/kubernetes/pki -# # name for kubelet kubeconfig file -# KUBELET_KUBE_CONFIG_NAME=kubelet.conf -# ################################################## -# # path for kubelet -# PATH_KUBELET_LIB=/var/lib/kubelet -# # path for kubelet -# PATH_KUBELET_CONF=/var/lib/kubelet -# # name for config file of kubelet -# KUBELET_CONFIG_NAME=config.yaml - -# function GenerateKubeadmConfig() { -# echo "--- -# apiVersion: kubeadm.k8s.io/v1beta2 -# discovery: -# bootstrapToken: -# apiServerEndpoint: apiserver.cluster.local:6443 -# token: $1 -# unsafeSkipCAVerification: true -# kind: JoinConfiguration -# nodeRegistration: -# criSocket: /run/containerd/containerd.sock -# kubeletExtraArgs: -# container-runtime: remote -# container-runtime-endpoint: unix:///run/containerd/containerd.sock -# taints: null" > $2/kubeadm.cfg.current -# } - - - - -SCRIPT_VERSION=0.0.1 -# save tmp file -PATH_FILE_TMP=/apps/conf/kosmos/tmp -# path for kubeadm config -PATH_KUBEADM_CONFIG=/etc/kubeadm -# path for kubelet lib -PATH_KUBELET_LIB=/var/lib/kubelet - - -function GetKubeletConfigFilePath() { - systemctl status kubelet | grep -o '\--config=[^ ]*' | awk -F= '{print $2}' -} - -function GetKubeletKubeConfigFilePath() { - systemctl status kubelet | grep -o '\--kubeconfig=[^ ]*' | awk -F= '{print $2}' -} - -function GetKubernetesCaPath() { - kubectl get cm kubelet-config -nkube-system -oyaml | awk '/clientCAFile:/{print $2}' -} - -function GetKubeDnsClusterIP() { - kubectl get svc -nkube-system kube-dns -o jsonpath='{.spec.clusterIP}' -} - -function GetFileName() { - local fullpath="$1" - local filename=$(basename "$fullpath") - echo "$filename" -} - -function GetDirectory() { - local fullpath="$1" - if [ -z "$fullpath" ]; then - echo "Error: No directory found." - exit 1 - fi - local directory=$(dirname "$fullpath") - echo "$directory" -} - -function GetMasterNodeIPs() { - kubectl get nodes -l node-role.kubernetes.io/$1="" -o jsonpath='{range .items[*]}{.status.addresses[?(@.type=="InternalIP")].address}{" "}{end}' -} - -# kubelet config name -KUBELET_CONFIG_NAME=$(GetFileName "$(GetKubeletConfigFilePath)") -# path for kubelet -PATH_KUBELET_CONF=$(GetDirectory "$(GetKubeletConfigFilePath)") -# kubelet kubeconfig file name -KUBELET_KUBE_CONFIG_NAME=$(GetFileName "$(GetKubeletKubeConfigFilePath)") - -# ca.crt path -PATH_KUBERNETES_PKI=$(GetDirectory "$(GetKubernetesCaPath)") -# length=${#PATH_KUBERNETES_PKI} -PATH_KUBERNETES=$(GetDirectory $PATH_KUBERNETES_PKI) -HOST_CORE_DNS=$(GetKubeDnsClusterIP) - -DOCKER_IMAGE_NGINX="registry.paas/cmss/nginx:1.21.4" -DOCKER_IMAGE_LVSCARE="registry.paas/cmss/lvscare:1.0.0" - -master_lables=("master", "control-plane") - -for mlabel in "${master_lables[@]}"; do - SERVERS=$(GetMasterNodeIPs $mlabel) - if [ -z "$SERVERS" ]; then - echo "Warning: No master nodes labeled $mlabel." - else - break - fi -done -if [ -z "$SERVERS" ]; then - echo "Error: No master nodes found or failed to retrieve node IPs." - exit 1 -fi - -LOCAL_PORT="6443" -LOCAL_IP="127.0.0.1" # [::1] -CRI_SOCKET=$(ps -aux | grep kubelet | grep -- '--container-runtime-endpoint' | awk -F'--container-runtime-endpoint=' '{print $2}' | awk '{print $1}' | sed 's/^unix:\/\///') - -echo "#!/usr/bin/env bash - -# ##### -# Generate by script generate_env.sh -# ##### - -SCRIPT_VERSION=$SCRIPT_VERSION -# tmp dir of kosmos -PATH_FILE_TMP=$PATH_FILE_TMP -################################################## -# path for kubeadm config -PATH_KUBEADM_CONFIG=$PATH_KUBEADM_CONFIG -################################################## -# path for kubernetes, from kubelet args --config -PATH_KUBERNETES=$PATH_KUBERNETES -PATH_KUBERNETES_PKI=$PATH_KUBERNETES_PKI -# name for kubelet kubeconfig file -KUBELET_KUBE_CONFIG_NAME=$KUBELET_KUBE_CONFIG_NAME -################################################## -# path for kubelet -PATH_KUBELET_LIB=$PATH_KUBELET_LIB -# path for kubelet -PATH_KUBELET_CONF=$PATH_KUBELET_CONF -# name for config file of kubelet -KUBELET_CONFIG_NAME=$KUBELET_CONFIG_NAME -HOST_CORE_DNS=$HOST_CORE_DNS -# Generate kubelet.conf TIMEOUT -KUBELET_CONF_TIMEOUT=30 - -# load balance -DOCKER_IMAGE_NGINX=$DOCKER_IMAGE_NGINX -DOCKER_IMAGE_LVSCARE=$DOCKER_IMAGE_LVSCARE -SERVERS=($SERVERS) - -# Proxy Configuration Options -# Specify the proxy server to be used for traffic management or load balancing. -# Available options for USE_PROXY: -# - "NGINX" : Use NGINX as the proxy server. -# - "LVSCARE" : Use LVSCARE for load balancing (based on IPVS). -# - "NONE" : No proxy server will be used. -# Note: When USE_PROXY is set to "NONE", no proxy service will be configured. -USE_PROXY="LVSCARE" # Current proxy setting: LVSCARE for load balancing. - -# Proxy Service Port Configuration -# LOCAL_PORT specifies the port on which the proxy service listens. -# Example: -# - For Kubernetes setups, this is typically the API server port. -LOCAL_PORT="6443" # Proxy service listening port (default: 6443 for Kubernetes API). - -# Proxy Address Configuration -# LOCAL_IP specifies the address of the proxy service. -# - When USE_PROXY is set to "NGINX": -# - Use LOCAL_IP="127.0.0.1" (IPv4) or LOCAL_IP="[::1]" (IPv6 loopback). -# - When USE_PROXY is set to "LVSCARE": -# - Use LOCAL_IP as the VIP (e.g., "192.0.0.2") for LVSCARE load balancing. -# - Ensure this address is added to the "excludeCIDRs" list in the kube-proxy configuration file -# to avoid routing conflicts. -LOCAL_IP="192.0.0.2" # LVSCARE setup: Proxy address and VIP for load balancing. - - -CRI_SOCKET=$CRI_SOCKET - -function GenerateKubeadmConfig() { - echo \"--- -apiVersion: kubeadm.k8s.io/v1beta2 -caCertPath: $PATH_KUBERNETES_PKI/ca.crt -discovery: - bootstrapToken: - apiServerEndpoint: apiserver.cluster.local:6443 - token: \$1 - unsafeSkipCAVerification: true -kind: JoinConfiguration -nodeRegistration: - criSocket: $CRI_SOCKET - kubeletExtraArgs: - container-runtime: remote - container-runtime-endpoint: unix://$CRI_SOCKET - taints: null\" > \$2/kubeadm.cfg.current -} - -function GenerateStaticNginxProxy() { - config_path=/apps/conf/nginx - if [ "\$1" == \"true\" ]; then - config_path=\$PATH_FILE_TMP - fi - echo \"apiVersion: v1 -kind: Pod -metadata: - creationTimestamp: null - name: nginx-proxy - namespace: kube-system -spec: - containers: - - image: \$DOCKER_IMAGE_NGINX - imagePullPolicy: IfNotPresent - name: nginx-proxy - resources: - limits: - cpu: 300m - memory: 512M - requests: - cpu: 25m - memory: 32M - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/nginx - name: etc-nginx - readOnly: true - hostNetwork: true - priorityClassName: system-node-critical - volumes: - - hostPath: - path: \$config_path - type: - name: etc-nginx -status: {}\" > $PATH_KUBERNETES/manifests/nginx-proxy.yaml -} - -" > g.env.sh - - -cat g.env.sh \ No newline at end of file diff --git a/hack/k8s-in-k8s/globalnodes_helper.sh b/hack/k8s-in-k8s/globalnodes_helper.sh deleted file mode 100644 index b349bdc6c..000000000 --- a/hack/k8s-in-k8s/globalnodes_helper.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash - -filename="nodes.txt" -readarray -t globalnodes < "$filename" - -function updateState() { - local nodename="$1" - local state="$2" - kubectl patch globalnodes $nodename -p '{"spec": {"state": "'$state'"}}' --type=merge -} - -function updateNodeState() { - local nodename="$1" - local state="$2" - kubectl patch node $nodename -p '{"metadata": {"labels": {"kosmos-io/state": "'$state'"}}}' -} - -function uncordon() { - local nodename="$1" - kubectl uncordon $nodename - kubectl taint nodes $nodename node.kosmos.io/unschedulable- -} - - -# Update the state of the global nodes -function free_globalnodes() { - local globalnode="$1" - updateState "$globalnode" "free" - updateNodeState "$globalnode" "free" -} - - - -# Update the state of the global nodes -function reserved_globalnodes() { - local globalnode="$1" - updateState "$globalnode" "reserved" - updateNodeState "$globalnode" "reserved" - uncordon "$globalnode" -} - - -# Function to display progress bar -show_progress() { - local progress=$1 - local total=$2 - local width=$3 - - # Calculate percentage - local percent=$((progress * 100 / total)) - local num_hashes=$((percent * width / 100)) - - # Generate progress bar - local bar="[" - for ((i = 0; i < width; i++)); do - if ((i < num_hashes)); then - bar+="#" - else - bar+=" " - fi - done - bar+="]" - - # Print progress bar with percentage - printf "\rProgress: %s %d%%" "$bar" "$percent" -} - -# Total steps for the task -total_steps=${#globalnodes[@]} -# Width of the progress bar -bar_width=50 - -function free() { - # Simulate a task by looping through steps - for ((step = 1; step <= total_steps; step++)); do - # Simulate work with sleep - index=$((step - 1)) - free_globalnodes ${globalnodes[index]} - - # Update progress bar - show_progress $step $total_steps $bar_width - done - - # Print a new line after the progress bar completes - echo -} - -function reserved() { - # Simulate a task by looping through steps - for ((step = 1; step <= total_steps; step++)); do - # Simulate work with sleep - index=$((step - 1)) - reserved_globalnodes ${globalnodes[index]} - - # Update progress bar - show_progress $step $total_steps $bar_width - done - - # Print a new line after the progress bar completes - echo -} - - -# See how we were called. -case "$1" in - free) - free - ;; - reserved) - reserved - ;; - *) - echo $"usage: $0 free|reserved" - exit 1 -esac \ No newline at end of file diff --git a/hack/k8s-in-k8s/kubelet_node_helper.sh b/hack/k8s-in-k8s/kubelet_node_helper.sh deleted file mode 100755 index f6e3cb44b..000000000 --- a/hack/k8s-in-k8s/kubelet_node_helper.sh +++ /dev/null @@ -1,715 +0,0 @@ -#!/usr/bin/env bash - -source "env.sh" - -# args -DNS_ADDRESS=${2:-10.237.0.10} -LOG_NAME=${2:-kubelet} -JOIN_HOST=$2 -JOIN_TOKEN=$3 -JOIN_CA_HASH=$4 -NODE_LOCAL_DNS_ADDRESS=$3 - - -function cri_runtime_clean() { - criSocket=unix://$CRI_SOCKET - containers=($(crictl -r $criSocket pods -q)) - - if [ ${#containers[@]} -eq 0 ]; then - echo "No containers found in containerd" - return 0 - fi - - for container in "${containers[@]}"; do - echo "Stopping container: $container" - crictl -r $criSocket stopp "$container" - echo "Removing container: $container" - crictl -r $criSocket rmp "$container" - done -} - - -function docker_runtime_clean() { - containers=($(docker ps -a --filter name=k8s_ -q)) - - if [ ${#containers[@]} -eq 0 ]; then - echo "No containers found matching the filter 'k8s_'" - return 0 - fi - - for container in "${containers[@]}"; do - echo "Stopping container: $container" - docker stop "$container" - echo "Removing container: $container" - docker rm "$container" - done - -} - -# Function to unmount all directories under a given directory -function unmount_kubelet_directory() { - kubelet_dir="$1" - - if [ -z "$kubelet_dir" ]; then - echo "Error: kubelet directory not specified." - exit 1 - fi - - # Ensure the directory has a trailing slash - if [[ "$kubelet_dir" != */ ]]; then - kubelet_dir="${kubelet_dir}/" - fi - - - mounts=($(awk -v dir="$kubelet_dir" '$0 ~ dir {print $2}' /proc/mounts)) - - for mount in "${mounts[@]}"; do - echo "Unmounting $mount..." - if ! umount "$mount"; then - echo "Warning: Failed to unmount $mount" >&2 - fi - done -} - -function clean_dirs() { - files_to_delete=( - "${PATH_KUBELET_LIB}/*" - "${PATH_KUBERNETES}/manifests/*" - "${PATH_KUBERNETES_PKI}/*" - "${PATH_KUBERNETES}/admin.conf" - "${PATH_KUBERNETES}/kubelet.conf" - "${PATH_KUBERNETES}/bootstrap-kubelet.conf" - "${PATH_KUBERNETES}/controller-manager.conf" - "${PATH_KUBERNETES}/scheduler.conf" - "/var/lib/dockershim" - "/var/run/kubernetes" - "/var/lib/cni" - ) - for file in "${files_to_delete[@]}"; do - echo "Deleting file: $file" - rm -rf $file - done -} - - -# similar to the reset function of kubeadm. kubernetes/cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go -function node_reset() { - echo "exec node_reset(1/4): stop kubelet...." - systemctl stop kubelet - - echo "exec node_reset(2/4): remove container of kubernetes...." - if [[ "$CRI_SOCKET" == *"docker"* ]]; then - docker_runtime_clean - elif [[ "$CRI_SOCKET" == *"containerd"* ]]; then - cri_runtime_clean - else - echo "Unknown runtime: $CRI_SOCKET" - exit 1 - fi - - echo "exec node_reset(3/4): unmount kubelet lib...." - # /kubernetes/cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go:151 CleanDir - unmount_kubelet_directory "${PATH_KUBELET_LIB}" - - echo "exec node_reset(4/4): clean file for kubernetes...." - clean_dirs -} - -function unjoin() { - # before unjoin, you need delete node by kubectl - echo "exec(1/5): kubeadm reset...." - node_reset - if [ $? -ne 0 ]; then - exit 1 - fi - - echo "exec(2/5): restart cotnainerd...." - systemctl restart containerd - if [ $? -ne 0 ]; then - exit 1 - fi - - echo "exec(3/5): delete cni...." - if [ -d "/etc/cni/net.d" ]; then - mv /etc/cni/net.d '/etc/cni/net.d.kosmos.back'`date +%Y_%m_%d_%H_%M_%S` - if [ $? -ne 0 ]; then - exit 1 - fi - fi - - echo "exec(4/5): delete ca.crt" - if [ -f "$PATH_KUBERNETES_PKI/ca.crt" ]; then - echo "y" | rm "$PATH_KUBERNETES_PKI/ca.crt" - if [ $? -ne 0 ]; then - exit 1 - fi - fi - - echo "exec(5/5): delete kubelet.conf" - if [ -f "$PATH_KUBELET_CONF/${KUBELET_CONFIG_NAME}" ]; then - echo "y" | rm "$PATH_KUBELET_CONF/${KUBELET_CONFIG_NAME}" - if [ $? -ne 0 ]; then - exit 1 - fi - fi -} - -function before_revert() { - if [ -f "/apps/conf/nginx/nginx.conf" ]; then - # modify hosts - config_file="/apps/conf/nginx/nginx.conf" - - server_address=$(grep -Po 'server\s+\K[^:]+(?=:6443)' "$config_file" | awk 'NR==1') - hostname=$(echo $JOIN_HOST | awk -F ":" '{print $1}') - host_record="$server_address $hostname" - if grep -qFx "$host_record" /etc/hosts; then - echo "Record $host_record already exists in /etc/hosts." - else - sed -i "1i $host_record" /etc/hosts - echo "Record $host_record inserted into /etc/hosts." - fi - fi -} - -function after_revert() { - if [ -f "/apps/conf/nginx/nginx.conf" ]; then - # modify hosts - config_file="/apps/conf/nginx/nginx.conf" - - server_address=$(grep -Po 'server\s+\K[^:]+(?=:6443)' "$config_file" | awk 'NR==1') - hostname=$(echo $JOIN_HOST | awk -F ":" '{print $1}') - host_record="$server_address $hostname" - if grep -qFx "$host_record" /etc/hosts; then - sudo sed -i "/^$host_record/d" /etc/hosts - fi - - local_record="127.0.0.1 $hostname" - if grep -qFx "$local_record" /etc/hosts; then - echo "Record $local_record already exists in /etc/hosts." - else - sed -i "1i $local_record" /etc/hosts - echo "Record $local_record inserted into /etc/hosts." - fi - - GenerateStaticNginxProxy - fi -} - -function get_ca_certificate() { - local output_file="$PATH_KUBERNETES_PKI/ca.crt" - local kubeconfig_data=$(curl -sS --insecure "https://$JOIN_HOST/api/v1/namespaces/kube-public/configmaps/cluster-info" 2>/dev/null | \ - grep -oP 'certificate-authority-data:\s*\K.*(?=server:[^[:space:]]*?)' | \ - sed -e 's/^certificate-authority-data://' -e 's/[[:space:]]//g' -e 's/\\n$//g') - - # verify the kubeconfig data is not empty - if [ -z "$kubeconfig_data" ]; then - echo "Failed to extract certificate-authority-data." - return 1 - fi - - # Base64 decoded and written to a file - echo "$kubeconfig_data" | base64 --decode > "$output_file" - - # check that the file was created successfully - if [ -f "$output_file" ]; then - echo "certificate-authority-data saved to $output_file" - else - echo "Failed to save certificate-authority-data to $output_file" - return 1 - fi -} - -function create_kubelet_bootstrap_config() { - # Checks if the parameters are provided - if [ -z "$JOIN_HOST" ] || [ -z "$JOIN_TOKEN" ]; then - echo "Please provide server and token as parameters." - return 1 - fi - - # Define file contents - cat << EOF > bootstrap-kubelet.conf -apiVersion: v1 -kind: Config -clusters: -- cluster: - certificate-authority: $PATH_KUBERNETES_PKI/ca.crt - server: https://$JOIN_HOST - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: kubelet-bootstrap - name: kubelet-bootstrap-context -current-context: kubelet-bootstrap-context -preferences: {} -users: -- name: kubelet-bootstrap - user: - token: $JOIN_TOKEN -EOF - - # copy the file to the /etc/kubernetes directory - cp bootstrap-kubelet.conf $PATH_KUBERNETES - - echo "the file bootstrap-kubelet.conf has stored in $PATH_KUBERNETES directory." -} - -function revert() { - echo "exec(1/6): update kubeadm.cfg..." - if [ ! -f "$PATH_KUBEADM_CONFIG/kubeadm.cfg" ]; then - GenerateKubeadmConfig $JOIN_TOKEN $PATH_FILE_TMP - else - sed -e "s|token: .*$|token: $JOIN_TOKEN|g" -e "w $PATH_FILE_TMP/kubeadm.cfg.current" "$PATH_KUBEADM_CONFIG/kubeadm.cfg" - fi - - # add taints - echo "exec(2/6): update kubeadm.cfg tanits..." - sed -i "/kubeletExtraArgs/a \ register-with-taints: node.kosmos.io/unschedulable:NoSchedule" "$PATH_FILE_TMP/kubeadm.cfg.current" - if [ $? -ne 0 ]; then - exit 1 - fi - - echo "exec(3/6): update kubelet-config..." - sed -e "s|__DNS_ADDRESS__|$HOST_CORE_DNS|g" -e "w ${PATH_KUBELET_CONF}/${KUBELET_CONFIG_NAME}" "$PATH_FILE_TMP"/"$KUBELET_CONFIG_NAME" - if [ $? -ne 0 ]; then - exit 1 - fi - - before_revert - if [ $? -ne 0 ]; then - exit 1 - fi - - - echo "exec(4/6): execute join cmd...." - - echo "NONONO use kubeadm to join node to host" - get_ca_certificate $JOIN_HOST - if [ $? -ne 0 ]; then - exit 1 - fi - create_kubelet_bootstrap_config $JOIN_HOST $JOIN_TOKEN - if [ -f "${PATH_FILE_TMP}/kubeadm-flags.env.origin" ]; then - cp "${PATH_FILE_TMP}/kubeadm-flags.env.origin" "${PATH_KUBELET_LIB}" && \ - mv "${PATH_KUBELET_LIB}/kubeadm-flags.env.origin" "${PATH_KUBELET_LIB}/kubeadm-flags.env" - else - cp "${PATH_FILE_TMP}/kubeadm-flags.env" "${PATH_KUBELET_LIB}" - fi - - echo "exec(5/6): restart cotnainerd...." - systemctl restart containerd - if [ $? -ne 0 ]; then - exit 1 - fi - - systemctl start kubelet - elapsed_time=0 - - while [ $elapsed_time -lt $KUBELET_CONF_TIMEOUT ]; do - if [ -f "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" ]; then - rm -f "${PATH_KUBERNETES}/bootstrap-kubelet.conf" - echo "Deleted bootstrap-kubelet.conf file as kubelet.conf exists." - break - fi - sleep 2 - elapsed_time=$((elapsed_time + 2)) - done - - if [ $elapsed_time -ge $KUBELET_CONF_TIMEOUT ]; then - echo "Timeout: kubelet.conf was not generated within $KUBELET_CONF_TIMEOUT seconds. Continuing script execution." - fi - - after_revert - - if [ $? -ne 0 ]; then - exit 1 - fi - - echo "exec(6/6): revert manifests...." - if [ -d "$PATH_FILE_TMP/manifests.origin" ]; then - if [[ -n "$(ls -A ${PATH_FILE_TMP}/manifests.origin/ 2>/dev/null)" ]]; then - cp -r ${PATH_FILE_TMP}/manifests.origin/* "${PATH_KUBERNETES}/manifests/" - else - echo "No files in ${PATH_FILE_TMP}/manifests.origin" - fi - - fi - -} - -# before join, you need upload ca.crt and kubeconfig to tmp dir!!! -function join() { - echo "exec(1/8): stop containerd...." - systemctl stop containerd - if [ $? -ne 0 ]; then - exit 1 - fi - echo "exec(2/8): copy ca.crt...." - cp "$PATH_FILE_TMP/ca.crt" "$PATH_KUBERNETES_PKI/ca.crt" - if [ $? -ne 0 ]; then - exit 1 - fi - echo "exec(3/8): copy kubeconfig...." - cp "$PATH_FILE_TMP/$KUBELET_KUBE_CONFIG_NAME" "$PATH_KUBERNETES/$KUBELET_KUBE_CONFIG_NAME" - if [ $? -ne 0 ]; then - exit 1 - fi - echo "exec(4/8): set core dns address...." - if [ -n "$NODE_LOCAL_DNS_ADDRESS" ]; then - sed -e "/__DNS_ADDRESS__/i - ${NODE_LOCAL_DNS_ADDRESS}" \ - -e "s|__DNS_ADDRESS__|${DNS_ADDRESS}|g" \ - "$PATH_FILE_TMP/$KUBELET_CONFIG_NAME" \ - > "${PATH_KUBELET_CONF}/${KUBELET_CONFIG_NAME}" - else - sed -e "s|__DNS_ADDRESS__|$DNS_ADDRESS|g" -e "w ${PATH_KUBELET_CONF}/${KUBELET_CONFIG_NAME}" "$PATH_FILE_TMP"/"$KUBELET_CONFIG_NAME" - fi - if [ $? -ne 0 ]; then - exit 1 - fi - echo "exec(5/8): copy kubeadm-flags.env...." - cp "$PATH_FILE_TMP/kubeadm-flags.env" "$PATH_KUBELET_LIB/kubeadm-flags.env" - if [ $? -ne 0 ]; then - exit 1 - fi - - echo "exec(6/8): delete cni...." - if [ -d "/etc/cni/net.d" ]; then - mv /etc/cni/net.d '/etc/cni/net.d.back'`date +%Y_%m_%d_%H_%M_%S` - if [ $? -ne 0 ]; then - exit 1 - fi - fi - - echo "exec(7/8): start containerd" - systemctl start containerd - if [ $? -ne 0 ]; then - exit 1 - fi - - echo "exec(8/8): start kubelet...." - systemctl start kubelet - if [ $? -ne 0 ]; then - exit 1 - fi -} - -function health() { - result=`systemctl is-active containerd` - if [[ $result != "active" ]]; then - echo "health(1/2): containerd is inactive" - exit 1 - else - echo "health(1/2): containerd is active" - fi - - result=`systemctl is-active kubelet` - if [[ $result != "active" ]]; then - echo "health(2/2): kubelet is inactive" - exit 1 - else - echo "health(2/2): containerd is active" - fi -} - -function log() { - systemctl status $LOG_NAME -} - -function backup_manifests() { - echo "backup_manifests(1/1): backup manifests" - if [ ! -d "$PATH_FILE_TMP/manifests.origin" ]; then - mkdir -p "$PATH_FILE_TMP/manifests.origin" - if [ $? -ne 0 ]; then - exit 1 - fi - if [[ -n "$(ls -A ${PATH_KUBERNETES}/manifests/ 2>/dev/null)" ]]; then - cp -rf ${PATH_KUBERNETES}/manifests/* ${PATH_FILE_TMP}/manifests.origin/ - else - echo "No files in ${PATH_KUBERNETES}/manifests/" - fi - fi -} - -# check the environments -function check() { - # TODO: create env file - echo "check(1/2): try to create $PATH_FILE_TMP" - if [ ! -d "$PATH_FILE_TMP" ]; then - mkdir -p "$PATH_FILE_TMP" - if [ $? -ne 0 ]; then - exit 1 - fi - fi - - echo "check(2/2): copy kubeadm-flags.env to create $PATH_FILE_TMP , remove args[cloud-provider] and taints" - # Since this function is used both to detach nodes, we need to make sure we haven't copied kubeadm-flags.env before - if [ ! -f "${PATH_FILE_TMP}/kubeadm-flags.env.origin" ]; then - cp "${PATH_KUBELET_LIB}/kubeadm-flags.env" "${PATH_FILE_TMP}/kubeadm-flags.env.origin" - fi - sed -e "s| --cloud-provider=external | |g" -e "w ${PATH_FILE_TMP}/kubeadm-flags.env" "$PATH_KUBELET_LIB/kubeadm-flags.env" - sed -i "s| --register-with-taints=node.kosmos.io/unschedulable:NoSchedule||g" "${PATH_FILE_TMP}/kubeadm-flags.env" - if [ $? -ne 0 ]; then - exit 1 - fi - - backup_manifests - echo "environments is ok" -} - -function version() { - echo "$SCRIPT_VERSION" -} - -function is_ipv6() { - if [[ "$1" =~ : ]]; then - return 0 - else - return 1 - fi -} - -function wait_api_server_proxy_ready() { - local retries=0 - local max_retries=10 - local sleep_duration=6 - - while true; do - response=$(curl -k --connect-timeout 5 --max-time 10 https://${LOCAL_IP}:${LOCAL_PORT}/healthz) - - if [ "$response" == "ok" ]; then - echo "apiserver proxy is ready!" - return 0 - else - retries=$((retries + 1)) - echo "apiserver proxy is not ready. Retrying(${retries}/${max_retries})..." - if [ "$retries" -ge "$max_retries" ]; then - echo "Max retries reached. apiserver proxy did not become ready." - return 1 - fi - sleep $sleep_duration - fi - done -} - -function install_nginx_lb() { - echo "exec(1/7): get port of apiserver...." - - PORT=$(grep 'server:' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" | awk -F '[:/]' '{print $NF}') - - if [ -z "$PORT" ]; then - echo "can not get port" - exit 1 - else - echo "port is $PORT" - fi - - if [ "$LOCAL_PORT" -eq "$PORT" ]; then - echo "Error: LOCAL_PORT ($LOCAL_PORT) cannot be the same as the backend port ($PORT)." - exit 0 - fi - - # Start generating nginx.conf - echo "exec(2/7): generate nginx.conf...." - cat < "$PATH_FILE_TMP/nginx.conf" -error_log stderr notice; -worker_processes 1; -events { - multi_accept on; - use epoll; - worker_connections 1024; -} - -stream { - upstream kube_apiserver { - least_conn; -EOL - - # Loop through the array and append each server to the nginx.conf file - for SERVER in "${SERVERS[@]}"; do - if is_ipv6 "$SERVER"; then - echo " server [$SERVER]:$PORT;" >> "$PATH_FILE_TMP/nginx.conf" - else - echo " server $SERVER:$PORT;" >> "$PATH_FILE_TMP/nginx.conf" - fi - done - - # Continue writing the rest of the nginx.conf - cat <> "$PATH_FILE_TMP/nginx.conf" - } - server { - listen [::]:$LOCAL_PORT; - listen 6443; - proxy_pass kube_apiserver; - proxy_timeout 10m; - proxy_connect_timeout 10s; - } -} -EOL - - echo "exec(3/7): create static pod" - GenerateStaticNginxProxy true - - - echo "exec(4/7): restart static pod" - mv "${PATH_KUBERNETES}/manifests/nginx-proxy.yaml" "${PATH_KUBERNETES}/nginx-proxy.yaml" - sleep 2 - mv "${PATH_KUBERNETES}/nginx-proxy.yaml" "${PATH_KUBERNETES}/manifests/nginx-proxy.yaml" - - echo "exec(5/7): wati nginx ready" - if wait_api_server_proxy_ready; then - echo "nginx is ready" - else - echo "nginx is not ready" - exit 1 - fi - - echo "exec(6/7): update kubelet.conf" - cp "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}.bak" - sed -i "s|server: .*|server: https://${LOCAL_IP}:${LOCAL_PORT}|" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" - - echo "exec(7/7): restart kubelet" - systemctl restart kubelet -} - -function install_lvscare_lb() { - echo "exec(1/7): get port of apiserver...." - - PORT=$(grep 'server:' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" | awk -F '[:/]' '{print $NF}') - - if [ -z "$PORT" ]; then - echo "can not get port" - exit 1 - else - echo "port is $PORT" - fi - - # Start generating kube-lvscare.yaml - echo "exec(2/7): generate kube-lvscare.yaml...." - - cat < $PATH_KUBERNETES/manifests/kube-lvscare.yaml -apiVersion: v1 -kind: Pod -metadata: - labels: - app: kube-lvscare - name: kube-lvscare - namespace: kube-system -spec: - containers: - - args: - - care - - --vs - - ${LOCAL_IP}:${LOCAL_PORT} - - --health-path - - /healthz - - --health-schem - - https -EOL - - # Loop through the array and append each server to the kube-lvscare.yaml file - for SERVER in "${SERVERS[@]}"; do - if is_ipv6 "$SERVER"; then - echo " - --rs" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml" - echo " - [$SERVER]:$PORT" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml" - else - echo " - --rs" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml" - echo " - $SERVER:$PORT" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml" - fi - done - - # Continue writing the rest of the kube-lvscare.yaml file - cat <> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml" - command: - - /usr/bin/lvscare - image: $DOCKER_IMAGE_LVSCARE - imagePullPolicy: Always - name: kube-lvscare - resources: {} - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - hostNetwork: true - volumes: - - hostPath: - path: /lib/modules - name: lib-modules -status: {} -EOL - - echo "exec(3/7): restart static pod" - mv "${PATH_KUBERNETES}/manifests/kube-lvscare.yaml" "${PATH_KUBERNETES}/kube-lvscare.yaml" - sleep 2 - mv "${PATH_KUBERNETES}/kube-lvscare.yaml" "${PATH_KUBERNETES}/manifests/kube-lvscare.yaml" - - echo "exec(4/7): wait lvscare ready" - if wait_api_server_proxy_ready; then - echo "lvscare is ready" - else - echo "lvscare is not ready" - exit 1 - fi - - echo "exec(5/7): update kubelet.conf" - cp "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}.bak" - sed -i "s|server: .*|server: https://apiserver.virtual-cluster-system.svc:${LOCAL_PORT}|" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" - - echo "exec(6/7): update /etc/hosts" - local_record="${LOCAL_IP} apiserver.virtual-cluster-system.svc" - if grep -qFx "$local_record" /etc/hosts; then - echo "Record $local_record already exists in /etc/hosts." - else - sed -i "1i $local_record" /etc/hosts - echo "Record $local_record inserted into /etc/hosts." - fi - - echo "exec(7/7): restart kubelet" - systemctl restart kubelet -} - -function install_lb() { - if [ -z "$USE_PROXY" ]; then - export USE_PROXY="LVSCARE" - fi - - if [ "$USE_PROXY" = "NGINX" ]; then - install_nginx_lb - elif [ "$USE_PROXY" = "LVSCARE" ]; then - install_lvscare_lb - else - exit 0 - fi -} - -# See how we were called. -case "$1" in - unjoin) - unjoin - ;; - install_lb) - install_lb - ;; - join) - join - ;; - health) - health - ;; - check) - check - ;; - log) - log - ;; - revert) - revert - ;; - version) - version - ;; - *) - echo $"usage: $0 unjoin|join|health|log|check|version|revert" - exit 1 -esac \ No newline at end of file diff --git a/hack/k8s-in-k8s/node_agent_backup.sh b/hack/k8s-in-k8s/node_agent_backup.sh deleted file mode 100755 index 0c1317f71..000000000 --- a/hack/k8s-in-k8s/node_agent_backup.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -echo "(1/2) Try to backup tmp dir" -mv /apps/conf/kosmos/tmp /apps/conf/kosmos/tmp.bk -if [ ! $? -eq 0 ]; then - echo "backup tmp dir failed" - exit -fi - -echo "(2/2) Try to backup kubelet_node_helper" -mv /srv/node-agent/kubelet_node_helper.sh '/srv/node-agent/kubelet_node_helper.sh.'`date +%Y_%m_%d_%H_%M_%S` -if [ ! $? -eq 0 ]; then - echo "backup kubelet_node_helper.sh failed" - exit -fi - -echo "backup successed" \ No newline at end of file diff --git a/hack/k8s-in-k8s/port_check.sh b/hack/k8s-in-k8s/port_check.sh deleted file mode 100644 index d27cb349d..000000000 --- a/hack/k8s-in-k8s/port_check.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -function check_port { - local ip=$1 - local port=$2 - - # Check if the IP address is IPv6, then enclose it in square brackets - if [[ $ip =~ .*:.* ]]; then - ip="[$ip]" - fi - - if timeout 1 curl -s --connect-timeout 3 "${ip}:${port}" >/dev/null; then - return 0 - else - return 1 - fi -} - -nodes=$(kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="InternalIP")].address}{"\n"}{end}') - -node_array=() - -while IFS= read -r line; do - node_array+=("$line") -done <<< "$nodes" - -for node in "${node_array[@]}"; do - name=$(echo $node | awk '{print $1}') - ip=$(echo $node | awk '{print $2}') - - if check_port $ip 5678; then - echo "" - else - echo "节点: $name, IP: $ip 端口5678不可访问" - fi -done diff --git a/hack/local-cleanup-kosmos_kubenest.sh b/hack/local-cleanup-kosmos_kubenest.sh deleted file mode 100755 index 80943b093..000000000 --- a/hack/local-cleanup-kosmos_kubenest.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -VERSION=${VERSION:-"latest"} - -function usage() { - echo "Usage:" - echo " hack/local-down-kosmos_kubenest.sh [-k] [-h]" - echo "Args:" - echo " k: keep the local images" - echo " h: print help information" -} - -keep_images="false" -while getopts 'kh' OPT; do - case $OPT in - k) keep_images="true";; - h) - usage - exit 0 - ;; - ?) - usage - exit 1 - ;; - esac -done - -KUBE_NEST_CLUSTER_NAME=${KUBE_NEST_CLUSTER_NAME:-"kubenest-cluster"} - -#step1 remove kind clusters -echo -e "\nStart removing kind clusters" -kind delete cluster --name "${KUBE_NEST_CLUSTER_NAME}" -echo "Remove kind clusters successfully." - -ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -CLUSTER_DIR="${ROOT}/environments" -source "${ROOT}/hack/cluster.sh" - -#step2. remove kubeconfig -echo -e "\nStart removing kubeconfig, kindconfig, cailcoconfig" -KUBE_NEST_CLUSTER_CONFIG=${KUBE_NEST_CLUSTER_CONFIG:-"${CLUSTER_DIR}/${KUBE_NEST_CLUSTER_NAME}"} -delete_cluster "${KUBE_NEST_CLUSTER_CONFIG}" "${KUBE_NEST_CLUSTER_CONFIG}" - -echo "Remove cluster configs successfully." - -#step3. remove docker images -echo -e "\nStart removing images" -registry="ghcr.io/kosmos-io" -images=( -"${registry}/virtual-cluster-operator:${VERSION}" -"${registry}/node-agent:${VERSION}" -) -if [[ "${keep_images}" == "false" ]] ; then - for ((i=0;i<${#images[*]};i++)); do - docker rmi ${images[i]} || true - done - echo "Remove images successfully." -else - echo "Skip removing images as required." -fi - -echo -e "\nLocal Kubenest is removed successfully." diff --git a/hack/local-up-kosmos_kubenest.sh b/hack/local-up-kosmos_kubenest.sh deleted file mode 100755 index b52c7b510..000000000 --- a/hack/local-up-kosmos_kubenest.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - - -function usage() { - echo "Usage:" - echo " hack/local-up-kosmos.sh [HOST_IPADDRESS] [-h]" - echo "Args:" - echo " HOST_IPADDRESS: (required) if you want to export clusters' API server port to specific IP address" - echo " h: print help information" -} - -while getopts 'h' OPT; do - case $OPT in - h) - usage - exit 0 - ;; - ?) - usage - exit 1 - ;; - esac -done - - -KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} -export KUBECONFIG=$KUBECONFIG_PATH/"config" - -KIND_IMAGE=${KIND_IMAGE:-"kindest/node:v1.27.2"} -HOST_IPADDRESS=${1:-} -KUBE_NEST_CLUSTER_NAME="kubenest-cluster" -HOST_CLUSTER_POD_CIDR="10.233.64.0/18" -HOST_CLUSTER_SERVICE_CIDR="10.233.0.0/18" - -REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -VERSION=${VERSION:-"latest"} -source "$(dirname "${BASH_SOURCE[0]}")/install_kind_kubectl.sh" -source "$(dirname "${BASH_SOURCE[0]}")/cluster.sh" -source "$(dirname "${BASH_SOURCE[0]}")/util.sh" - -#step1. create host cluster and member clusters in parallel -# host IP address: script parameter ahead of macOS IP -if [[ -z "${HOST_IPADDRESS}" ]]; then - util::get_macos_ipaddress # Adapt for macOS - HOST_IPADDRESS=${MAC_NIC_IPADDRESS:-} -fi -make images GOOS="linux" VERSION="$VERSION" --directory="${REPO_ROOT}" - -make kosmosctl -os=$(go env GOOS) -arch=$(go env GOARCH) -export PATH=$PATH:"${REPO_ROOT}"/_output/bin/"$os"/"$arch" - -# prepare docker image -prepare_docker_image - -create_cluster "${KIND_IMAGE}" "$HOST_IPADDRESS" $KUBE_NEST_CLUSTER_NAME $HOST_CLUSTER_POD_CIDR $HOST_CLUSTER_SERVICE_CIDR false true - -load_kubenetst_cluster_images $KUBE_NEST_CLUSTER_NAME diff --git a/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go b/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go deleted file mode 100644 index 24f2c8331..000000000 --- a/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go +++ /dev/null @@ -1,155 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type Phase string - -const ( - // Preparing means kubernetes control plane is preparing,and kubeconfig is not ready - Preparing Phase = "Preparing" - // Initialized means kubernetes control plane is ready,and kubeconfig is ready for use - Initialized Phase = "Initialized" - // Completed means everything is ready,kosmos is joined, and resource is promoted - Completed Phase = "Completed" - // AllNodeReady means all nodes have joined the virtual control plane and are in the running state - AllNodeReady Phase = "AllNodeReady" - // AllNodeDeleted means all nodes have been deleted - AllNodeDeleted Phase = "AllNodeDeleted" - // Deleting means virtualcluster is being deleted - Deleting Phase = "Deleting" - // Updating means that some changes are happening - Updating Phase = "Updating" - Pending Phase = "Pending" -) - -// +genclient -// +kubebuilder:resource:scope=Namespaced,shortName=vc -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="UPDATE-TIME",type=string,JSONPath=`.status.updateTime` -type VirtualCluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec is the specification for the behaviour of the VirtualCluster. - // +required - Spec VirtualClusterSpec `json:"spec"` - - // Status describes the current status of a VirtualCluster. - // +optional - Status VirtualClusterStatus `json:"status,omitempty"` -} - -type VirtualClusterSpec struct { - // Kubeconfig is the kubeconfig of the virtual kubernetes's control plane - // +optional - Kubeconfig string `json:"kubeconfig,omitempty"` - - // ExternalIP is the external ip of the virtual kubernetes's control plane - // +optional - ExternalIP string `json:"externalIP,omitempty"` - - // ExternalIps is the external ips of the virtual kubernetes's control plane - // +optional - ExternalIps []string `json:"externalIps,omitempty"` - - // KubeInKubeConfig is the external config of virtual cluster - // +optional - KubeInKubeConfig *KubeInKubeConfig `json:"kubeInKubeConfig,omitempty"` - // PromotePolicies definites the policies for promote to the kubernetes's control plane - // +required - PromotePolicies []PromotePolicy `json:"promotePolicies,omitempty"` - - // PromoteResources definites the resources for promote to the kubernetes's control plane, - // the resources can be nodes or just cpu,memory or gpu resources - // +optional - PromoteResources PromoteResources `json:"promoteResources,omitempty"` - - // PluginSet is the list of plugins that will be used by the virtual kubernetes's control plane - // If plugins is nil or empty, all default plugins will be used - // +optional - PluginSet PluginSet `json:"pluginSet,omitempty"` - - // datasource for plugin yaml - // +optional - PluginOptions []PluginOptions `json:"pluginOptions,omitempty"` -} - -// PluginSet specifies enabled and disabled plugins . -// If an array is empty, missing, or nil, all plugins of VirtualClusterPlugin will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled . - // +optional - Enabled []Plugin `json:"enabled,omitempty"` - - // Disabled specifies default plugins that should be disabled. - // +optional - Disabled []Plugin `json:"disabled,omitempty"` -} - -// Plugin specifies a plugin name -type Plugin struct { - // Name defines the name of plugin - // +required - Name string `json:"name"` -} - -type PluginOptions struct { - // +required - Name string `json:"name"` - // +required - Value string `json:"value"` -} - -type PromotePolicy struct { - // LabelSelector is used to select nodes that are eligible for promotion to the kubernetes's control plane. - // +optional - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` - - // NodeCount is the number of nodes to promote to the kubernetes's control plane - // +required - NodeCount int32 `json:"nodeCount"` -} - -type PromoteResources struct { - // NodeInfos is the info of nodes to promote to the kubernetes's control plane - // +optional - NodeInfos []NodeInfo `json:"nodeInfos,omitempty"` - - // Resources is the resources to promote to the kubernetes's control plane - // +optional - Resources corev1.ResourceList `json:"resources,omitempty"` -} - -type NodeInfo struct { - //NodeName defines node name - //+optional - NodeName string `json:"nodeName,omitempty"` -} - -type VirtualClusterStatus struct { - // Phase is the phase of kosmos-operator handling the VirtualCluster - // +optional - Phase Phase `json:"phase,omitempty"` - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // +optional - UpdateTime *metav1.Time `json:"updateTime,omitempty" protobuf:"bytes,7,opt,name=updateTime"` - // +optional - Port int32 `json:"port,omitempty"` - // +optional - PortMap map[string]int32 `json:"portMap,omitempty"` - // +optional - VipMap map[string]string `json:"vipMap,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type VirtualClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []VirtualCluster `json:"items"` -} diff --git a/pkg/apis/kosmos/v1alpha1/virtualclusterplugin_types.go b/pkg/apis/kosmos/v1alpha1/virtualclusterplugin_types.go deleted file mode 100644 index ad8b26bde..000000000 --- a/pkg/apis/kosmos/v1alpha1/virtualclusterplugin_types.go +++ /dev/null @@ -1,81 +0,0 @@ -package v1alpha1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=vp -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type VirtualClusterPlugin struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // VirtualClusterPluginSpec is the specification for a VirtualClusterPlugin resource - // +required - Spec VirtualClusterPluginSpec `json:"spec"` -} - -type VirtualClusterPluginSpec struct { - // +optional - PluginSources PluginSources `json:"pluginSources,omitempty"` - - // +optional - SuccessStateCommand string `json:"successStateCommand,omitempty"` -} - -type PluginSources struct { - // +optional - Chart Chart `json:"chart,omitempty"` - // +optional - Yaml Yaml `json:"yaml,omitempty"` -} - -type Chart struct { - // +optional - Name string `json:"name,omitempty"` - // +optional - Repo string `json:"repo,omitempty"` - // +optional - Storage Storage `json:"storage,omitempty"` - // +optional - Version string `json:"version,omitempty"` - // +optional - ValuesFile Storage `json:"valuesFile,omitempty"` - // +optional - Values []string `json:"values,omitempty"` - // +optional - Wait bool `json:"wait,omitempty"` -} - -type Yaml struct { - // +required - Path Storage `json:"path"` -} - -type Storage struct { - // +optional - HostPath HostPath `json:"hostPath,omitempty"` - - // +optional - PVPath string `json:"pvPath,omitempty"` - - // +optional - URI string `json:"uri,omitempty"` -} - -type HostPath struct { - // +optional - Path string `json:"path,omitempty"` - - // +optional - NodeName string `json:"nodeName,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type VirtualClusterPluginList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []VirtualClusterPlugin `json:"items"` -} diff --git a/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go index d10041a7c..7a9d7f2fb 100644 --- a/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go @@ -84,29 +84,6 @@ func (in *Arp) DeepCopy() *Arp { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Chart) DeepCopyInto(out *Chart) { - *out = *in - out.Storage = in.Storage - out.ValuesFile = in.ValuesFile - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Chart. -func (in *Chart) DeepCopy() *Chart { - if in == nil { - return nil - } - out := new(Chart) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in @@ -1005,22 +982,6 @@ func (in *HostAliasesConverter) DeepCopy() *HostAliasesConverter { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPath) DeepCopyInto(out *HostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPath. -func (in *HostPath) DeepCopy() *HostPath { - if in == nil { - return nil - } - out := new(HostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Iptables) DeepCopyInto(out *Iptables) { *out = *in @@ -1286,22 +1247,6 @@ func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeInfo) DeepCopyInto(out *NodeInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeInfo. -func (in *NodeInfo) DeepCopy() *NodeInfo { - if in == nil { - return nil - } - out := new(NodeInfo) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeNameConverter) DeepCopyInto(out *NodeNameConverter) { *out = *in @@ -1362,82 +1307,6 @@ func (in *NodeSelectorConverter) DeepCopy() *NodeSelectorConverter { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginOptions) DeepCopyInto(out *PluginOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginOptions. -func (in *PluginOptions) DeepCopy() *PluginOptions { - if in == nil { - return nil - } - out := new(PluginOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSources) DeepCopyInto(out *PluginSources) { - *out = *in - in.Chart.DeepCopyInto(&out.Chart) - out.Yaml = in.Yaml - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSources. -func (in *PluginSources) DeepCopy() *PluginSources { - if in == nil { - return nil - } - out := new(PluginSources) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodConvertPolicy) DeepCopyInto(out *PodConvertPolicy) { *out = *in @@ -1542,55 +1411,6 @@ func (in *PolicyTerm) DeepCopy() *PolicyTerm { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PromotePolicy) DeepCopyInto(out *PromotePolicy) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromotePolicy. -func (in *PromotePolicy) DeepCopy() *PromotePolicy { - if in == nil { - return nil - } - out := new(PromotePolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PromoteResources) DeepCopyInto(out *PromoteResources) { - *out = *in - if in.NodeInfos != nil { - in, out := &in.NodeInfos, &out.NodeInfos - *out = make([]NodeInfo, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromoteResources. -func (in *PromoteResources) DeepCopy() *PromoteResources { - if in == nil { - return nil - } - out := new(PromoteResources) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Proxy) DeepCopyInto(out *Proxy) { *out = *in @@ -1730,23 +1550,6 @@ func (in *ShadowDaemonSetList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Storage) DeepCopyInto(out *Storage) { - *out = *in - out.HostPath = in.HostPath - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. -func (in *Storage) DeepCopy() *Storage { - if in == nil { - return nil - } - out := new(Storage) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TenantEntrypoint) DeepCopyInto(out *TenantEntrypoint) { *out = *in @@ -1819,218 +1622,6 @@ func (in *TopologySpreadConstraintsConverter) DeepCopy() *TopologySpreadConstrai return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualCluster) DeepCopyInto(out *VirtualCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualCluster. -func (in *VirtualCluster) DeepCopy() *VirtualCluster { - if in == nil { - return nil - } - out := new(VirtualCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VirtualCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualClusterList) DeepCopyInto(out *VirtualClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VirtualCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterList. -func (in *VirtualClusterList) DeepCopy() *VirtualClusterList { - if in == nil { - return nil - } - out := new(VirtualClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VirtualClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualClusterPlugin) DeepCopyInto(out *VirtualClusterPlugin) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPlugin. -func (in *VirtualClusterPlugin) DeepCopy() *VirtualClusterPlugin { - if in == nil { - return nil - } - out := new(VirtualClusterPlugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VirtualClusterPlugin) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualClusterPluginList) DeepCopyInto(out *VirtualClusterPluginList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VirtualClusterPlugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPluginList. -func (in *VirtualClusterPluginList) DeepCopy() *VirtualClusterPluginList { - if in == nil { - return nil - } - out := new(VirtualClusterPluginList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VirtualClusterPluginList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualClusterPluginSpec) DeepCopyInto(out *VirtualClusterPluginSpec) { - *out = *in - in.PluginSources.DeepCopyInto(&out.PluginSources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPluginSpec. -func (in *VirtualClusterPluginSpec) DeepCopy() *VirtualClusterPluginSpec { - if in == nil { - return nil - } - out := new(VirtualClusterPluginSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualClusterSpec) DeepCopyInto(out *VirtualClusterSpec) { - *out = *in - if in.ExternalIps != nil { - in, out := &in.ExternalIps, &out.ExternalIps - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.KubeInKubeConfig != nil { - in, out := &in.KubeInKubeConfig, &out.KubeInKubeConfig - *out = new(KubeInKubeConfig) - (*in).DeepCopyInto(*out) - } - if in.PromotePolicies != nil { - in, out := &in.PromotePolicies, &out.PromotePolicies - *out = make([]PromotePolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.PromoteResources.DeepCopyInto(&out.PromoteResources) - in.PluginSet.DeepCopyInto(&out.PluginSet) - if in.PluginOptions != nil { - in, out := &in.PluginOptions, &out.PluginOptions - *out = make([]PluginOptions, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterSpec. -func (in *VirtualClusterSpec) DeepCopy() *VirtualClusterSpec { - if in == nil { - return nil - } - out := new(VirtualClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualClusterStatus) DeepCopyInto(out *VirtualClusterStatus) { - *out = *in - if in.UpdateTime != nil { - in, out := &in.UpdateTime, &out.UpdateTime - *out = (*in).DeepCopy() - } - if in.PortMap != nil { - in, out := &in.PortMap, &out.PortMap - *out = make(map[string]int32, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.VipMap != nil { - in, out := &in.VipMap, &out.VipMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterStatus. -func (in *VirtualClusterStatus) DeepCopy() *VirtualClusterStatus { - if in == nil { - return nil - } - out := new(VirtualClusterStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VxlanCIDRs) DeepCopyInto(out *VxlanCIDRs) { *out = *in @@ -2046,20 +1637,3 @@ func (in *VxlanCIDRs) DeepCopy() *VxlanCIDRs { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Yaml) DeepCopyInto(out *Yaml) { - *out = *in - out.Path = in.Path - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Yaml. -func (in *Yaml) DeepCopy() *Yaml { - if in == nil { - return nil - } - out := new(Yaml) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/kosmos/v1alpha1/zz_generated.register.go b/pkg/apis/kosmos/v1alpha1/zz_generated.register.go index 49a6d7652..a774db08d 100644 --- a/pkg/apis/kosmos/v1alpha1/zz_generated.register.go +++ b/pkg/apis/kosmos/v1alpha1/zz_generated.register.go @@ -64,10 +64,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Proxy{}, &ShadowDaemonSet{}, &ShadowDaemonSetList{}, - &VirtualCluster{}, - &VirtualClusterList{}, - &VirtualClusterPlugin{}, - &VirtualClusterPluginList{}, ) // AddToGroupVersion allows the serialization of client types like ListOptions. v1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/pkg/clusterlink/controllers/nodecidr/adaper/blockwatchsyncer/blockeventhandler.go b/pkg/clusterlink/controllers/nodecidr/adaper/blockwatchsyncer/blockeventhandler.go index b2c7a79a6..a1288b300 100644 --- a/pkg/clusterlink/controllers/nodecidr/adaper/blockwatchsyncer/blockeventhandler.go +++ b/pkg/clusterlink/controllers/nodecidr/adaper/blockwatchsyncer/blockeventhandler.go @@ -1,11 +1,13 @@ package blockwatchsyncer import ( - "github.com/kosmos.io/kosmos/pkg/utils/lifted" + "time" + "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - "time" + + "github.com/kosmos.io/kosmos/pkg/utils/lifted" ) // syncedPollPeriod controls how often you look at the status of your sync funcs @@ -70,7 +72,7 @@ func (b *BlockEventHandler) OnUpdates(updates []api.Update) { } // todo put etcd's event info AsyncWorker's queue -func (b *BlockEventHandler) onupdate(event []api.Update) { +func (b *BlockEventHandler) onupdate(_ []api.Update) { } diff --git a/pkg/clusterlink/controllers/nodecidr/adaper/calico_etcd.go b/pkg/clusterlink/controllers/nodecidr/adaper/calico_etcd.go index 6157ecbc4..7dbf10e19 100644 --- a/pkg/clusterlink/controllers/nodecidr/adaper/calico_etcd.go +++ b/pkg/clusterlink/controllers/nodecidr/adaper/calico_etcd.go @@ -1,11 +1,12 @@ package adaper import ( + "github.com/projectcalico/calico/libcalico-go/lib/backend/api" + "k8s.io/klog/v2" + "github.com/kosmos.io/kosmos/pkg/clusterlink/controllers/nodecidr/adaper/blockwatchsyncer" clusterlister "github.com/kosmos.io/kosmos/pkg/generated/listers/kosmos/v1alpha1" "github.com/kosmos.io/kosmos/pkg/utils/lifted" - "github.com/projectcalico/calico/libcalico-go/lib/backend/api" - "k8s.io/klog/v2" ) type CalicoETCDAdapter struct { @@ -39,7 +40,7 @@ func (c *CalicoETCDAdapter) Start(stopCh <-chan struct{}) error { return nil } -func (c *CalicoETCDAdapter) GetCIDRByNodeName(nodeName string) ([]string, error) { +func (c *CalicoETCDAdapter) GetCIDRByNodeName(_ string) ([]string, error) { // see calicoctl/calicoctl/commands/datastore/migrate/migrateipam.go // and libcalico-go/lib/backend/model/block_affinity.go // todo use c.etcdClient to get blockaffinity in etcd diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go index d185900d1..a725b0a34 100644 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go @@ -52,14 +52,6 @@ func (c *FakeKosmosV1alpha1) ShadowDaemonSets(namespace string) v1alpha1.ShadowD return &FakeShadowDaemonSets{c, namespace} } -func (c *FakeKosmosV1alpha1) VirtualClusters(namespace string) v1alpha1.VirtualClusterInterface { - return &FakeVirtualClusters{c, namespace} -} - -func (c *FakeKosmosV1alpha1) VirtualClusterPlugins(namespace string) v1alpha1.VirtualClusterPluginInterface { - return &FakeVirtualClusterPlugins{c, namespace} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeKosmosV1alpha1) RESTClient() rest.Interface { diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_virtualcluster.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_virtualcluster.go deleted file mode 100644 index eae2046e1..000000000 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_virtualcluster.go +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeVirtualClusters implements VirtualClusterInterface -type FakeVirtualClusters struct { - Fake *FakeKosmosV1alpha1 - ns string -} - -var virtualclustersResource = schema.GroupVersionResource{Group: "kosmos.io", Version: "v1alpha1", Resource: "virtualclusters"} - -var virtualclustersKind = schema.GroupVersionKind{Group: "kosmos.io", Version: "v1alpha1", Kind: "VirtualCluster"} - -// Get takes name of the virtualCluster, and returns the corresponding virtualCluster object, and an error if there is any. -func (c *FakeVirtualClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VirtualCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(virtualclustersResource, c.ns, name), &v1alpha1.VirtualCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualCluster), err -} - -// List takes label and field selectors, and returns the list of VirtualClusters that match those selectors. -func (c *FakeVirtualClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VirtualClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(virtualclustersResource, virtualclustersKind, c.ns, opts), &v1alpha1.VirtualClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.VirtualClusterList{ListMeta: obj.(*v1alpha1.VirtualClusterList).ListMeta} - for _, item := range obj.(*v1alpha1.VirtualClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested virtualClusters. -func (c *FakeVirtualClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(virtualclustersResource, c.ns, opts)) - -} - -// Create takes the representation of a virtualCluster and creates it. Returns the server's representation of the virtualCluster, and an error, if there is any. -func (c *FakeVirtualClusters) Create(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.CreateOptions) (result *v1alpha1.VirtualCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(virtualclustersResource, c.ns, virtualCluster), &v1alpha1.VirtualCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualCluster), err -} - -// Update takes the representation of a virtualCluster and updates it. Returns the server's representation of the virtualCluster, and an error, if there is any. -func (c *FakeVirtualClusters) Update(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.UpdateOptions) (result *v1alpha1.VirtualCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(virtualclustersResource, c.ns, virtualCluster), &v1alpha1.VirtualCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualCluster), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVirtualClusters) UpdateStatus(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.UpdateOptions) (*v1alpha1.VirtualCluster, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(virtualclustersResource, "status", c.ns, virtualCluster), &v1alpha1.VirtualCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualCluster), err -} - -// Delete takes name of the virtualCluster and deletes it. Returns an error if one occurs. -func (c *FakeVirtualClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(virtualclustersResource, c.ns, name, opts), &v1alpha1.VirtualCluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVirtualClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(virtualclustersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.VirtualClusterList{}) - return err -} - -// Patch applies the patch and returns the patched virtualCluster. -func (c *FakeVirtualClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(virtualclustersResource, c.ns, name, pt, data, subresources...), &v1alpha1.VirtualCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualCluster), err -} diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_virtualclusterplugin.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_virtualclusterplugin.go deleted file mode 100644 index 0abda6d04..000000000 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_virtualclusterplugin.go +++ /dev/null @@ -1,114 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeVirtualClusterPlugins implements VirtualClusterPluginInterface -type FakeVirtualClusterPlugins struct { - Fake *FakeKosmosV1alpha1 - ns string -} - -var virtualclusterpluginsResource = schema.GroupVersionResource{Group: "kosmos.io", Version: "v1alpha1", Resource: "virtualclusterplugins"} - -var virtualclusterpluginsKind = schema.GroupVersionKind{Group: "kosmos.io", Version: "v1alpha1", Kind: "VirtualClusterPlugin"} - -// Get takes name of the virtualClusterPlugin, and returns the corresponding virtualClusterPlugin object, and an error if there is any. -func (c *FakeVirtualClusterPlugins) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VirtualClusterPlugin, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(virtualclusterpluginsResource, c.ns, name), &v1alpha1.VirtualClusterPlugin{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualClusterPlugin), err -} - -// List takes label and field selectors, and returns the list of VirtualClusterPlugins that match those selectors. -func (c *FakeVirtualClusterPlugins) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VirtualClusterPluginList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(virtualclusterpluginsResource, virtualclusterpluginsKind, c.ns, opts), &v1alpha1.VirtualClusterPluginList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.VirtualClusterPluginList{ListMeta: obj.(*v1alpha1.VirtualClusterPluginList).ListMeta} - for _, item := range obj.(*v1alpha1.VirtualClusterPluginList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested virtualClusterPlugins. -func (c *FakeVirtualClusterPlugins) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(virtualclusterpluginsResource, c.ns, opts)) - -} - -// Create takes the representation of a virtualClusterPlugin and creates it. Returns the server's representation of the virtualClusterPlugin, and an error, if there is any. -func (c *FakeVirtualClusterPlugins) Create(ctx context.Context, virtualClusterPlugin *v1alpha1.VirtualClusterPlugin, opts v1.CreateOptions) (result *v1alpha1.VirtualClusterPlugin, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(virtualclusterpluginsResource, c.ns, virtualClusterPlugin), &v1alpha1.VirtualClusterPlugin{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualClusterPlugin), err -} - -// Update takes the representation of a virtualClusterPlugin and updates it. Returns the server's representation of the virtualClusterPlugin, and an error, if there is any. -func (c *FakeVirtualClusterPlugins) Update(ctx context.Context, virtualClusterPlugin *v1alpha1.VirtualClusterPlugin, opts v1.UpdateOptions) (result *v1alpha1.VirtualClusterPlugin, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(virtualclusterpluginsResource, c.ns, virtualClusterPlugin), &v1alpha1.VirtualClusterPlugin{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualClusterPlugin), err -} - -// Delete takes name of the virtualClusterPlugin and deletes it. Returns an error if one occurs. -func (c *FakeVirtualClusterPlugins) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(virtualclusterpluginsResource, c.ns, name, opts), &v1alpha1.VirtualClusterPlugin{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVirtualClusterPlugins) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(virtualclusterpluginsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.VirtualClusterPluginList{}) - return err -} - -// Patch applies the patch and returns the patched virtualClusterPlugin. -func (c *FakeVirtualClusterPlugins) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualClusterPlugin, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(virtualclusterpluginsResource, c.ns, name, pt, data, subresources...), &v1alpha1.VirtualClusterPlugin{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VirtualClusterPlugin), err -} diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go index e2dc929a9..bf34b2446 100644 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go @@ -21,7 +21,3 @@ type NodeConfigExpansion interface{} type PodConvertPolicyExpansion interface{} type ShadowDaemonSetExpansion interface{} - -type VirtualClusterExpansion interface{} - -type VirtualClusterPluginExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go index 9279c5377..8d4342eec 100644 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go @@ -22,8 +22,6 @@ type KosmosV1alpha1Interface interface { NodeConfigsGetter PodConvertPoliciesGetter ShadowDaemonSetsGetter - VirtualClustersGetter - VirtualClusterPluginsGetter } // KosmosV1alpha1Client is used to interact with features provided by the kosmos.io group. @@ -71,14 +69,6 @@ func (c *KosmosV1alpha1Client) ShadowDaemonSets(namespace string) ShadowDaemonSe return newShadowDaemonSets(c, namespace) } -func (c *KosmosV1alpha1Client) VirtualClusters(namespace string) VirtualClusterInterface { - return newVirtualClusters(c, namespace) -} - -func (c *KosmosV1alpha1Client) VirtualClusterPlugins(namespace string) VirtualClusterPluginInterface { - return newVirtualClusterPlugins(c, namespace) -} - // NewForConfig creates a new KosmosV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/virtualcluster.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/virtualcluster.go deleted file mode 100644 index c171d52c0..000000000 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/virtualcluster.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - scheme "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// VirtualClustersGetter has a method to return a VirtualClusterInterface. -// A group's client should implement this interface. -type VirtualClustersGetter interface { - VirtualClusters(namespace string) VirtualClusterInterface -} - -// VirtualClusterInterface has methods to work with VirtualCluster resources. -type VirtualClusterInterface interface { - Create(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.CreateOptions) (*v1alpha1.VirtualCluster, error) - Update(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.UpdateOptions) (*v1alpha1.VirtualCluster, error) - UpdateStatus(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.UpdateOptions) (*v1alpha1.VirtualCluster, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VirtualCluster, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VirtualClusterList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualCluster, err error) - VirtualClusterExpansion -} - -// virtualClusters implements VirtualClusterInterface -type virtualClusters struct { - client rest.Interface - ns string -} - -// newVirtualClusters returns a VirtualClusters -func newVirtualClusters(c *KosmosV1alpha1Client, namespace string) *virtualClusters { - return &virtualClusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the virtualCluster, and returns the corresponding virtualCluster object, and an error if there is any. -func (c *virtualClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VirtualCluster, err error) { - result = &v1alpha1.VirtualCluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("virtualclusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VirtualClusters that match those selectors. -func (c *virtualClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VirtualClusterList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VirtualClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("virtualclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested virtualClusters. -func (c *virtualClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("virtualclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a virtualCluster and creates it. Returns the server's representation of the virtualCluster, and an error, if there is any. -func (c *virtualClusters) Create(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.CreateOptions) (result *v1alpha1.VirtualCluster, err error) { - result = &v1alpha1.VirtualCluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("virtualclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(virtualCluster). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a virtualCluster and updates it. Returns the server's representation of the virtualCluster, and an error, if there is any. -func (c *virtualClusters) Update(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.UpdateOptions) (result *v1alpha1.VirtualCluster, err error) { - result = &v1alpha1.VirtualCluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("virtualclusters"). - Name(virtualCluster.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(virtualCluster). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *virtualClusters) UpdateStatus(ctx context.Context, virtualCluster *v1alpha1.VirtualCluster, opts v1.UpdateOptions) (result *v1alpha1.VirtualCluster, err error) { - result = &v1alpha1.VirtualCluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("virtualclusters"). - Name(virtualCluster.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(virtualCluster). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the virtualCluster and deletes it. Returns an error if one occurs. -func (c *virtualClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("virtualclusters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *virtualClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("virtualclusters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched virtualCluster. -func (c *virtualClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualCluster, err error) { - result = &v1alpha1.VirtualCluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("virtualclusters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/virtualclusterplugin.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/virtualclusterplugin.go deleted file mode 100644 index 4a9bc73ee..000000000 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/virtualclusterplugin.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - scheme "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// VirtualClusterPluginsGetter has a method to return a VirtualClusterPluginInterface. -// A group's client should implement this interface. -type VirtualClusterPluginsGetter interface { - VirtualClusterPlugins(namespace string) VirtualClusterPluginInterface -} - -// VirtualClusterPluginInterface has methods to work with VirtualClusterPlugin resources. -type VirtualClusterPluginInterface interface { - Create(ctx context.Context, virtualClusterPlugin *v1alpha1.VirtualClusterPlugin, opts v1.CreateOptions) (*v1alpha1.VirtualClusterPlugin, error) - Update(ctx context.Context, virtualClusterPlugin *v1alpha1.VirtualClusterPlugin, opts v1.UpdateOptions) (*v1alpha1.VirtualClusterPlugin, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VirtualClusterPlugin, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VirtualClusterPluginList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualClusterPlugin, err error) - VirtualClusterPluginExpansion -} - -// virtualClusterPlugins implements VirtualClusterPluginInterface -type virtualClusterPlugins struct { - client rest.Interface - ns string -} - -// newVirtualClusterPlugins returns a VirtualClusterPlugins -func newVirtualClusterPlugins(c *KosmosV1alpha1Client, namespace string) *virtualClusterPlugins { - return &virtualClusterPlugins{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the virtualClusterPlugin, and returns the corresponding virtualClusterPlugin object, and an error if there is any. -func (c *virtualClusterPlugins) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VirtualClusterPlugin, err error) { - result = &v1alpha1.VirtualClusterPlugin{} - err = c.client.Get(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VirtualClusterPlugins that match those selectors. -func (c *virtualClusterPlugins) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VirtualClusterPluginList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VirtualClusterPluginList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested virtualClusterPlugins. -func (c *virtualClusterPlugins) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a virtualClusterPlugin and creates it. Returns the server's representation of the virtualClusterPlugin, and an error, if there is any. -func (c *virtualClusterPlugins) Create(ctx context.Context, virtualClusterPlugin *v1alpha1.VirtualClusterPlugin, opts v1.CreateOptions) (result *v1alpha1.VirtualClusterPlugin, err error) { - result = &v1alpha1.VirtualClusterPlugin{} - err = c.client.Post(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(virtualClusterPlugin). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a virtualClusterPlugin and updates it. Returns the server's representation of the virtualClusterPlugin, and an error, if there is any. -func (c *virtualClusterPlugins) Update(ctx context.Context, virtualClusterPlugin *v1alpha1.VirtualClusterPlugin, opts v1.UpdateOptions) (result *v1alpha1.VirtualClusterPlugin, err error) { - result = &v1alpha1.VirtualClusterPlugin{} - err = c.client.Put(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - Name(virtualClusterPlugin.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(virtualClusterPlugin). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the virtualClusterPlugin and deletes it. Returns an error if one occurs. -func (c *virtualClusterPlugins) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *virtualClusterPlugins) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("virtualclusterplugins"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched virtualClusterPlugin. -func (c *virtualClusterPlugins) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualClusterPlugin, err error) { - result = &v1alpha1.VirtualClusterPlugin{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("virtualclusterplugins"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index cde210941..19d68b183 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -58,10 +58,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().PodConvertPolicies().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("shadowdaemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().ShadowDaemonSets().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("virtualclusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().VirtualClusters().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("virtualclusterplugins"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().VirtualClusterPlugins().Informer()}, nil // Group=multicluster.x-k8s.io, Version=v1alpha1 case apisv1alpha1.SchemeGroupVersion.WithResource("serviceexports"): diff --git a/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go b/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go index 95629ea6f..37a9f8117 100644 --- a/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go +++ b/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go @@ -28,10 +28,6 @@ type Interface interface { PodConvertPolicies() PodConvertPolicyInformer // ShadowDaemonSets returns a ShadowDaemonSetInformer. ShadowDaemonSets() ShadowDaemonSetInformer - // VirtualClusters returns a VirtualClusterInformer. - VirtualClusters() VirtualClusterInformer - // VirtualClusterPlugins returns a VirtualClusterPluginInformer. - VirtualClusterPlugins() VirtualClusterPluginInformer } type version struct { @@ -94,13 +90,3 @@ func (v *version) PodConvertPolicies() PodConvertPolicyInformer { func (v *version) ShadowDaemonSets() ShadowDaemonSetInformer { return &shadowDaemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } - -// VirtualClusters returns a VirtualClusterInformer. -func (v *version) VirtualClusters() VirtualClusterInformer { - return &virtualClusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// VirtualClusterPlugins returns a VirtualClusterPluginInformer. -func (v *version) VirtualClusterPlugins() VirtualClusterPluginInformer { - return &virtualClusterPluginInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/generated/informers/externalversions/kosmos/v1alpha1/virtualcluster.go b/pkg/generated/informers/externalversions/kosmos/v1alpha1/virtualcluster.go deleted file mode 100644 index b8b39c018..000000000 --- a/pkg/generated/informers/externalversions/kosmos/v1alpha1/virtualcluster.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - kosmosv1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - versioned "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - internalinterfaces "github.com/kosmos.io/kosmos/pkg/generated/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kosmos.io/kosmos/pkg/generated/listers/kosmos/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// VirtualClusterInformer provides access to a shared informer and lister for -// VirtualClusters. -type VirtualClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.VirtualClusterLister -} - -type virtualClusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewVirtualClusterInformer constructs a new informer for VirtualCluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewVirtualClusterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredVirtualClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredVirtualClusterInformer constructs a new informer for VirtualCluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredVirtualClusterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.KosmosV1alpha1().VirtualClusters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.KosmosV1alpha1().VirtualClusters(namespace).Watch(context.TODO(), options) - }, - }, - &kosmosv1alpha1.VirtualCluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *virtualClusterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredVirtualClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *virtualClusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&kosmosv1alpha1.VirtualCluster{}, f.defaultInformer) -} - -func (f *virtualClusterInformer) Lister() v1alpha1.VirtualClusterLister { - return v1alpha1.NewVirtualClusterLister(f.Informer().GetIndexer()) -} diff --git a/pkg/generated/informers/externalversions/kosmos/v1alpha1/virtualclusterplugin.go b/pkg/generated/informers/externalversions/kosmos/v1alpha1/virtualclusterplugin.go deleted file mode 100644 index 868116b5e..000000000 --- a/pkg/generated/informers/externalversions/kosmos/v1alpha1/virtualclusterplugin.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - kosmosv1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - versioned "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - internalinterfaces "github.com/kosmos.io/kosmos/pkg/generated/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/kosmos.io/kosmos/pkg/generated/listers/kosmos/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// VirtualClusterPluginInformer provides access to a shared informer and lister for -// VirtualClusterPlugins. -type VirtualClusterPluginInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.VirtualClusterPluginLister -} - -type virtualClusterPluginInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewVirtualClusterPluginInformer constructs a new informer for VirtualClusterPlugin type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewVirtualClusterPluginInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredVirtualClusterPluginInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredVirtualClusterPluginInformer constructs a new informer for VirtualClusterPlugin type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredVirtualClusterPluginInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.KosmosV1alpha1().VirtualClusterPlugins(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.KosmosV1alpha1().VirtualClusterPlugins(namespace).Watch(context.TODO(), options) - }, - }, - &kosmosv1alpha1.VirtualClusterPlugin{}, - resyncPeriod, - indexers, - ) -} - -func (f *virtualClusterPluginInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredVirtualClusterPluginInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *virtualClusterPluginInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&kosmosv1alpha1.VirtualClusterPlugin{}, f.defaultInformer) -} - -func (f *virtualClusterPluginInformer) Lister() v1alpha1.VirtualClusterPluginLister { - return v1alpha1.NewVirtualClusterPluginLister(f.Informer().GetIndexer()) -} diff --git a/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go b/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go index 518df1adc..bf2c382bb 100644 --- a/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go +++ b/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go @@ -57,19 +57,3 @@ type ShadowDaemonSetListerExpansion interface{} // ShadowDaemonSetNamespaceListerExpansion allows custom methods to be added to // ShadowDaemonSetNamespaceLister. type ShadowDaemonSetNamespaceListerExpansion interface{} - -// VirtualClusterListerExpansion allows custom methods to be added to -// VirtualClusterLister. -type VirtualClusterListerExpansion interface{} - -// VirtualClusterNamespaceListerExpansion allows custom methods to be added to -// VirtualClusterNamespaceLister. -type VirtualClusterNamespaceListerExpansion interface{} - -// VirtualClusterPluginListerExpansion allows custom methods to be added to -// VirtualClusterPluginLister. -type VirtualClusterPluginListerExpansion interface{} - -// VirtualClusterPluginNamespaceListerExpansion allows custom methods to be added to -// VirtualClusterPluginNamespaceLister. -type VirtualClusterPluginNamespaceListerExpansion interface{} diff --git a/pkg/generated/listers/kosmos/v1alpha1/virtualcluster.go b/pkg/generated/listers/kosmos/v1alpha1/virtualcluster.go deleted file mode 100644 index f677a5bcd..000000000 --- a/pkg/generated/listers/kosmos/v1alpha1/virtualcluster.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// VirtualClusterLister helps list VirtualClusters. -// All objects returned here must be treated as read-only. -type VirtualClusterLister interface { - // List lists all VirtualClusters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VirtualCluster, err error) - // VirtualClusters returns an object that can list and get VirtualClusters. - VirtualClusters(namespace string) VirtualClusterNamespaceLister - VirtualClusterListerExpansion -} - -// virtualClusterLister implements the VirtualClusterLister interface. -type virtualClusterLister struct { - indexer cache.Indexer -} - -// NewVirtualClusterLister returns a new VirtualClusterLister. -func NewVirtualClusterLister(indexer cache.Indexer) VirtualClusterLister { - return &virtualClusterLister{indexer: indexer} -} - -// List lists all VirtualClusters in the indexer. -func (s *virtualClusterLister) List(selector labels.Selector) (ret []*v1alpha1.VirtualCluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VirtualCluster)) - }) - return ret, err -} - -// VirtualClusters returns an object that can list and get VirtualClusters. -func (s *virtualClusterLister) VirtualClusters(namespace string) VirtualClusterNamespaceLister { - return virtualClusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// VirtualClusterNamespaceLister helps list and get VirtualClusters. -// All objects returned here must be treated as read-only. -type VirtualClusterNamespaceLister interface { - // List lists all VirtualClusters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VirtualCluster, err error) - // Get retrieves the VirtualCluster from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VirtualCluster, error) - VirtualClusterNamespaceListerExpansion -} - -// virtualClusterNamespaceLister implements the VirtualClusterNamespaceLister -// interface. -type virtualClusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all VirtualClusters in the indexer for a given namespace. -func (s virtualClusterNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.VirtualCluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VirtualCluster)) - }) - return ret, err -} - -// Get retrieves the VirtualCluster from the indexer for a given namespace and name. -func (s virtualClusterNamespaceLister) Get(name string) (*v1alpha1.VirtualCluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("virtualcluster"), name) - } - return obj.(*v1alpha1.VirtualCluster), nil -} diff --git a/pkg/generated/listers/kosmos/v1alpha1/virtualclusterplugin.go b/pkg/generated/listers/kosmos/v1alpha1/virtualclusterplugin.go deleted file mode 100644 index 76605ee4b..000000000 --- a/pkg/generated/listers/kosmos/v1alpha1/virtualclusterplugin.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// VirtualClusterPluginLister helps list VirtualClusterPlugins. -// All objects returned here must be treated as read-only. -type VirtualClusterPluginLister interface { - // List lists all VirtualClusterPlugins in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VirtualClusterPlugin, err error) - // VirtualClusterPlugins returns an object that can list and get VirtualClusterPlugins. - VirtualClusterPlugins(namespace string) VirtualClusterPluginNamespaceLister - VirtualClusterPluginListerExpansion -} - -// virtualClusterPluginLister implements the VirtualClusterPluginLister interface. -type virtualClusterPluginLister struct { - indexer cache.Indexer -} - -// NewVirtualClusterPluginLister returns a new VirtualClusterPluginLister. -func NewVirtualClusterPluginLister(indexer cache.Indexer) VirtualClusterPluginLister { - return &virtualClusterPluginLister{indexer: indexer} -} - -// List lists all VirtualClusterPlugins in the indexer. -func (s *virtualClusterPluginLister) List(selector labels.Selector) (ret []*v1alpha1.VirtualClusterPlugin, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VirtualClusterPlugin)) - }) - return ret, err -} - -// VirtualClusterPlugins returns an object that can list and get VirtualClusterPlugins. -func (s *virtualClusterPluginLister) VirtualClusterPlugins(namespace string) VirtualClusterPluginNamespaceLister { - return virtualClusterPluginNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// VirtualClusterPluginNamespaceLister helps list and get VirtualClusterPlugins. -// All objects returned here must be treated as read-only. -type VirtualClusterPluginNamespaceLister interface { - // List lists all VirtualClusterPlugins in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VirtualClusterPlugin, err error) - // Get retrieves the VirtualClusterPlugin from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VirtualClusterPlugin, error) - VirtualClusterPluginNamespaceListerExpansion -} - -// virtualClusterPluginNamespaceLister implements the VirtualClusterPluginNamespaceLister -// interface. -type virtualClusterPluginNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all VirtualClusterPlugins in the indexer for a given namespace. -func (s virtualClusterPluginNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.VirtualClusterPlugin, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VirtualClusterPlugin)) - }) - return ret, err -} - -// Get retrieves the VirtualClusterPlugin from the indexer for a given namespace and name. -func (s virtualClusterPluginNamespaceLister) Get(name string) (*v1alpha1.VirtualClusterPlugin, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("virtualclusterplugin"), name) - } - return obj.(*v1alpha1.VirtualClusterPlugin), nil -} diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 4d2befe7f..16bb7a7de 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -18,7 +18,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.AdvancedTerm": schema_pkg_apis_kosmos_v1alpha1_AdvancedTerm(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.AffinityConverter": schema_pkg_apis_kosmos_v1alpha1_AffinityConverter(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Arp": schema_pkg_apis_kosmos_v1alpha1_Arp(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Chart": schema_pkg_apis_kosmos_v1alpha1_Chart(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Cluster": schema_pkg_apis_kosmos_v1alpha1_Cluster(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.ClusterDistributionPolicy": schema_pkg_apis_kosmos_v1alpha1_ClusterDistributionPolicy(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.ClusterDistributionPolicyList": schema_pkg_apis_kosmos_v1alpha1_ClusterDistributionPolicyList(ref), @@ -52,7 +51,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.GlobalNodeSpec": schema_pkg_apis_kosmos_v1alpha1_GlobalNodeSpec(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.GlobalNodeStatus": schema_pkg_apis_kosmos_v1alpha1_GlobalNodeStatus(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.HostAliasesConverter": schema_pkg_apis_kosmos_v1alpha1_HostAliasesConverter(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.HostPath": schema_pkg_apis_kosmos_v1alpha1_HostPath(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Iptables": schema_pkg_apis_kosmos_v1alpha1_Iptables(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.KosmosKubeConfig": schema_pkg_apis_kosmos_v1alpha1_KosmosKubeConfig(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.KubeInKubeConfig": schema_pkg_apis_kosmos_v1alpha1_KubeInKubeConfig(ref), @@ -64,39 +62,23 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeConfigList": schema_pkg_apis_kosmos_v1alpha1_NodeConfigList(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeConfigSpec": schema_pkg_apis_kosmos_v1alpha1_NodeConfigSpec(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeConfigStatus": schema_pkg_apis_kosmos_v1alpha1_NodeConfigStatus(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeInfo": schema_pkg_apis_kosmos_v1alpha1_NodeInfo(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeNameConverter": schema_pkg_apis_kosmos_v1alpha1_NodeNameConverter(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeSelector": schema_pkg_apis_kosmos_v1alpha1_NodeSelector(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeSelectorConverter": schema_pkg_apis_kosmos_v1alpha1_NodeSelectorConverter(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Plugin": schema_pkg_apis_kosmos_v1alpha1_Plugin(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginOptions": schema_pkg_apis_kosmos_v1alpha1_PluginOptions(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginSet": schema_pkg_apis_kosmos_v1alpha1_PluginSet(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginSources": schema_pkg_apis_kosmos_v1alpha1_PluginSources(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PodConvertPolicy": schema_pkg_apis_kosmos_v1alpha1_PodConvertPolicy(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PodConvertPolicyList": schema_pkg_apis_kosmos_v1alpha1_PodConvertPolicyList(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PodConvertPolicySpec": schema_pkg_apis_kosmos_v1alpha1_PodConvertPolicySpec(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PolicyTerm": schema_pkg_apis_kosmos_v1alpha1_PolicyTerm(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicy": schema_pkg_apis_kosmos_v1alpha1_PromotePolicy(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromoteResources": schema_pkg_apis_kosmos_v1alpha1_PromoteResources(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Proxy": schema_pkg_apis_kosmos_v1alpha1_Proxy(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.ResourceSelector": schema_pkg_apis_kosmos_v1alpha1_ResourceSelector(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Route": schema_pkg_apis_kosmos_v1alpha1_Route(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.SchedulerNameConverter": schema_pkg_apis_kosmos_v1alpha1_SchedulerNameConverter(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.ShadowDaemonSet": schema_pkg_apis_kosmos_v1alpha1_ShadowDaemonSet(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.ShadowDaemonSetList": schema_pkg_apis_kosmos_v1alpha1_ShadowDaemonSetList(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Storage": schema_pkg_apis_kosmos_v1alpha1_Storage(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.TenantEntrypoint": schema_pkg_apis_kosmos_v1alpha1_TenantEntrypoint(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.TolerationConverter": schema_pkg_apis_kosmos_v1alpha1_TolerationConverter(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.TopologySpreadConstraintsConverter": schema_pkg_apis_kosmos_v1alpha1_TopologySpreadConstraintsConverter(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualCluster": schema_pkg_apis_kosmos_v1alpha1_VirtualCluster(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterList": schema_pkg_apis_kosmos_v1alpha1_VirtualClusterList(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPlugin": schema_pkg_apis_kosmos_v1alpha1_VirtualClusterPlugin(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPluginList": schema_pkg_apis_kosmos_v1alpha1_VirtualClusterPluginList(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPluginSpec": schema_pkg_apis_kosmos_v1alpha1_VirtualClusterPluginSpec(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterSpec": schema_pkg_apis_kosmos_v1alpha1_VirtualClusterSpec(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterStatus": schema_pkg_apis_kosmos_v1alpha1_VirtualClusterStatus(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VxlanCIDRs": schema_pkg_apis_kosmos_v1alpha1_VxlanCIDRs(ref), - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Yaml": schema_pkg_apis_kosmos_v1alpha1_Yaml(ref), "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), @@ -270,70 +252,6 @@ func schema_pkg_apis_kosmos_v1alpha1_Arp(ref common.ReferenceCallback) common.Op } } -func schema_pkg_apis_kosmos_v1alpha1_Chart(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "repo": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "storage": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Storage"), - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "valuesFile": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Storage"), - }, - }, - "values": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "wait": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Storage"}, - } -} - func schema_pkg_apis_kosmos_v1alpha1_Cluster(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1844,30 +1762,6 @@ func schema_pkg_apis_kosmos_v1alpha1_HostAliasesConverter(ref common.ReferenceCa } } -func schema_pkg_apis_kosmos_v1alpha1_HostPath(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "nodeName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - func schema_pkg_apis_kosmos_v1alpha1_Iptables(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2375,25 +2269,6 @@ func schema_pkg_apis_kosmos_v1alpha1_NodeConfigStatus(ref common.ReferenceCallba } } -func schema_pkg_apis_kosmos_v1alpha1_NodeInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeName": { - SchemaProps: spec.SchemaProps{ - Description: "NodeName defines node name", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - func schema_pkg_apis_kosmos_v1alpha1_NodeNameConverter(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2484,124 +2359,6 @@ func schema_pkg_apis_kosmos_v1alpha1_NodeSelectorConverter(ref common.ReferenceC } } -func schema_pkg_apis_kosmos_v1alpha1_Plugin(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Plugin specifies a plugin name", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name defines the name of plugin", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_PluginOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_PluginSet(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PluginSet specifies enabled and disabled plugins . If an array is empty, missing, or nil, all plugins of VirtualClusterPlugin will be used.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "enabled": { - SchemaProps: spec.SchemaProps{ - Description: "Enabled specifies plugins that should be enabled .", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Plugin"), - }, - }, - }, - }, - }, - "disabled": { - SchemaProps: spec.SchemaProps{ - Description: "Disabled specifies default plugins that should be disabled.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Plugin"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Plugin"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_PluginSources(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "chart": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Chart"), - }, - }, - "yaml": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Yaml"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Chart", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Yaml"}, - } -} - func schema_pkg_apis_kosmos_v1alpha1_PodConvertPolicy(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2762,78 +2519,6 @@ func schema_pkg_apis_kosmos_v1alpha1_PolicyTerm(ref common.ReferenceCallback) co } } -func schema_pkg_apis_kosmos_v1alpha1_PromotePolicy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "labelSelector": { - SchemaProps: spec.SchemaProps{ - Description: "LabelSelector is used to select nodes that are eligible for promotion to the kubernetes's control plane.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "nodeCount": { - SchemaProps: spec.SchemaProps{ - Description: "NodeCount is the number of nodes to promote to the kubernetes's control plane", - Default: 0, - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"nodeCount"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_PromoteResources(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeInfos": { - SchemaProps: spec.SchemaProps{ - Description: "NodeInfos is the info of nodes to promote to the kubernetes's control plane", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeInfo"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Resources is the resources to promote to the kubernetes's control plane", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.NodeInfo", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - func schema_pkg_apis_kosmos_v1alpha1_Proxy(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3064,44 +2749,12 @@ func schema_pkg_apis_kosmos_v1alpha1_ShadowDaemonSetList(ref common.ReferenceCal } } -func schema_pkg_apis_kosmos_v1alpha1_Storage(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_kosmos_v1alpha1_TenantEntrypoint(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "hostPath": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.HostPath"), - }, - }, - "pvPath": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "uri": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.HostPath"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_TenantEntrypoint(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TenantEntrypoint contains the configuration for the tenant entrypoint.", - Type: []string{"object"}, + Description: "TenantEntrypoint contains the configuration for the tenant entrypoint.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ "externalIps": { SchemaProps: spec.SchemaProps{ @@ -3212,378 +2865,6 @@ func schema_pkg_apis_kosmos_v1alpha1_TopologySpreadConstraintsConverter(ref comm } } -func schema_pkg_apis_kosmos_v1alpha1_VirtualCluster(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec is the specification for the behaviour of the VirtualCluster.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status describes the current status of a VirtualCluster.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterStatus"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterSpec", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualCluster"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualCluster", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterPlugin(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "VirtualClusterPluginSpec is the specification for a VirtualClusterPlugin resource", - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPluginSpec"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPluginSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterPluginList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPlugin"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.VirtualClusterPlugin", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterPluginSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "pluginSources": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginSources"), - }, - }, - "successStateCommand": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginSources"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kubeconfig": { - SchemaProps: spec.SchemaProps{ - Description: "Kubeconfig is the kubeconfig of the virtual kubernetes's control plane", - Type: []string{"string"}, - Format: "", - }, - }, - "externalIP": { - SchemaProps: spec.SchemaProps{ - Description: "ExternalIP is the external ip of the virtual kubernetes's control plane", - Type: []string{"string"}, - Format: "", - }, - }, - "externalIps": { - SchemaProps: spec.SchemaProps{ - Description: "ExternalIps is the external ips of the virtual kubernetes's control plane", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "kubeInKubeConfig": { - SchemaProps: spec.SchemaProps{ - Description: "KubeInKubeConfig is the external config of virtual cluster", - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.KubeInKubeConfig"), - }, - }, - "promotePolicies": { - SchemaProps: spec.SchemaProps{ - Description: "PromotePolicies definites the policies for promote to the kubernetes's control plane", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicy"), - }, - }, - }, - }, - }, - "promoteResources": { - SchemaProps: spec.SchemaProps{ - Description: "PromoteResources definites the resources for promote to the kubernetes's control plane, the resources can be nodes or just cpu,memory or gpu resources", - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromoteResources"), - }, - }, - "pluginSet": { - SchemaProps: spec.SchemaProps{ - Description: "PluginSet is the list of plugins that will be used by the virtual kubernetes's control plane If plugins is nil or empty, all default plugins will be used", - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginSet"), - }, - }, - "pluginOptions": { - SchemaProps: spec.SchemaProps{ - Description: "datasource for plugin yaml", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginOptions"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.KubeInKubeConfig", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginOptions", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PluginSet", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicy", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromoteResources"}, - } -} - -func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase is the phase of kosmos-operator handling the VirtualCluster", - Type: []string{"string"}, - Format: "", - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "updateTime": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "port": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int32", - }, - }, - "portMap": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - "vipMap": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - func schema_pkg_apis_kosmos_v1alpha1_VxlanCIDRs(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3611,27 +2892,6 @@ func schema_pkg_apis_kosmos_v1alpha1_VxlanCIDRs(ref common.ReferenceCallback) co } } -func schema_pkg_apis_kosmos_v1alpha1_Yaml(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Storage"), - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{ - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Storage"}, - } -} - func schema_pkg_apis_meta_v1_APIGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/kubenest/common/resource.go b/pkg/kubenest/common/resource.go deleted file mode 100644 index 6a53a0ad9..000000000 --- a/pkg/kubenest/common/resource.go +++ /dev/null @@ -1,14 +0,0 @@ -package common - -import ( - "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" -) - -type APIServerExternalResource struct { - Namespace string - Name string - Vc *v1alpha1.VirtualCluster - RootClientSet kubernetes.Interface -} diff --git a/pkg/kubenest/constants/constant.go b/pkg/kubenest/constants/constant.go deleted file mode 100644 index 7d4c48dc8..000000000 --- a/pkg/kubenest/constants/constant.go +++ /dev/null @@ -1,162 +0,0 @@ -package constants - -import ( - "time" - - corev1 "k8s.io/api/core/v1" - - "github.com/kosmos.io/kosmos/pkg/utils" -) - -const ( - InitControllerName = "virtual-cluster-init-controller" - NodeControllerName = "virtual-cluster-node-controller" - GlobalNodeControllerName = "global-node-controller" - KosmosJoinControllerName = "kosmos-join-controller" - KosmosNs = "kosmos-system" - SystemNs = "kube-system" - DefaultNs = "default" - DefaultImageRepositoryEnv = "IMAGE_REPOSITIRY" - DefaultImageVersionEnv = "IMAGE_VERSION" - DefaultCoreDNSImageTagEnv = "COREDNS_IMAGE_TAG" - DefaultVirtualControllerLabelEnv = "VIRTUAL_CONTROLLER_LABEL" - VirtualClusterFinalizerName = "kosmos.io/virtual-cluster-finalizer" - ServiceType = "NodePort" - EtcdServiceType = "ClusterIP" - DisableCascadingDeletionLabel = "operator.virtualcluster.io/disable-cascading-deletion" - ControllerFinalizerName = "operator.virtualcluster.io/finalizer" - DefaultKubeconfigPath = "/etc/cluster-tree/cert" - Label = "virtualCluster-app" - LabelValue = "apiserver" - ComponentBeReadyTimeout = 300 * time.Second - ComponentBeDeletedTimeout = 300 * time.Second - - // CertificateBlockType is a possible value for pem.Block.Type. - CertificateBlockType = "CERTIFICATE" - RsaKeySize = 2048 - KeyExtension = ".key" - CertExtension = ".crt" - CertificateValidity = time.Hour * 24 * 365 * 100 - CaCertAndKeyName = "ca" - VirtualClusterCertAndKeyName = "virtualCluster" - VirtualClusterSystemNamespace = "virtual-cluster-system" - ApiserverCertAndKeyName = "apiserver" - EtcdCaCertAndKeyName = "etcd-ca" - EtcdServerCertAndKeyName = "etcd-server" - EtcdClientCertAndKeyName = "etcd-client" - FrontProxyCaCertAndKeyName = "front-proxy-ca" - FrontProxyClientCertAndKeyName = "front-proxy-client" - ProxyServerCertAndKeyName = "proxy-server" - - //controlplane apiserver - APIServer = "apiserver" - APIServerAnp = "apiserver-anp" - APIServerEtcdListenClientPort = 2379 - APIServerServiceType = "NodePort" - // APIServerCallRetryInterval defines how long kubeadm should wait before retrying a failed API operation - APIServerCallRetryInterval = 100 * time.Millisecond - APIServerSVCPortName = "client" - - //install kube-proxy in virtualCluster - Proxy = "kube-proxy" - // configmap kube-proxy clustercidr - - //controlplane etcd - Etcd = "etcd" - EtcdReplicas = 3 - EtcdDataVolumeName = "etcd-data" - EtcdListenClientPort = 2379 - EtcdListenPeerPort = 2380 - EtcdSuffix = "-etcd-client" - - //controlplane kube-controller - KubeControllerReplicas = 2 - KubeControllerManagerComponent = "KubeControllerManager" - KubeControllerManager = "kube-controller-manager" - KubeControllerManagerClusterCIDR = "10.244.0.0/16" - - //controlplane scheduler - VirtualClusterSchedulerReplicas = 2 - VirtualClusterSchedulerComponent = "VirtualClusterScheduler" - VirtualClusterSchedulerComponentConfigMap = "scheduler-config" - VirtualClusterScheduler = "scheduler" - VirtualClusterKubeProxyComponent = "kube-proxy" - - //controlplane auth - AdminConfig = "admin-config" - KubeConfig = "kubeconfig" - KubeProxyConfigmap = "kube-proxy" - - //controlplane upload - VirtualClusterLabelKeyName = "app.kubernetes.io/managed-by" - VirtualClusterController = "virtual-cluster-controller" - ClusterName = "virtualCluster-apiserver" - UserName = "virtualCluster-admin" - - // InitAction represents init virtual cluster instance - InitAction Action = "init" - // DeInitAction represents delete virtual cluster instance - DeInitAction Action = "deInit" - - //host_port_manager - HostPortsCMName = "kosmos-hostports" - HostPortsCMDataName = "config.yaml" - APIServerPortKey = "apiserver-port" - APIServerNetworkProxyAgentPortKey = "apiserver-network-proxy-agent-port" - APIServerNetworkProxyServerPortKey = "apiserver-network-proxy-server-port" - APIServerNetworkProxyHealthPortKey = "apiserver-network-proxy-health-port" - APIServerNetworkProxyAdminPortKey = "apiserver-network-proxy-admin-port" - VirtualClusterPortNum = 5 - - // vip - VipPoolConfigMapName = "kosmos-vip-pool" - VipPoolKey = "vip-config.yaml" - VcVipStatusKey = "vip-key" - VipKeepAlivedNodeLabelKey = "kosmos.io/keepalived-node" - VipKeepAlivedNodeLabelValue = "true" - VipKeepAlivedNodeRoleKey = "kosmos.io/keepalived-role" - VipKeepAlivedNodeRoleMaster = "master" - VipKeepalivedNodeRoleBackup = "backup" - VipKeepAlivedReplicas = 3 - VipKeepalivedComponentName = "keepalived" - - ManifestComponentsConfigMap = "components-manifest-cm" - - WaitAllPodsRunningTimeoutSeconds = 1800 - - // core-dns - KubeDNSSVCName = "kube-dns" - // nolint - HostCoreDnsComponents = "host-core-dns-components" - VirtualCoreDNSComponents = "virtual-core-dns-components" - PrometheusRuleManifest = "prometheus-rules" - TenantCoreDNSComponentName = "core-dns-tenant" - - StateLabelKey = "kosmos-io/state" - - KonnectivityServerSuffix = "konnectivity-server" - - //in virtual cluster - APIServerExternalService = "api-server-external-service" - - //nodelocaldns - NodeLocalDNSComponentName = "virtual-node-local-dns" - NodeLocalDNSIp = "169.254.20.10" - NodeLocalDNSClusterDomain = "cluster.local" - NodeLocalDNSService = "__PILLAR__DNS__SERVER__" -) - -type Action string - -var APIServerServiceSubnet string -var KubeControllerManagerPodSubnet string - -var PreferredAddressType corev1.NodeAddressType - -func init() { - APIServerServiceSubnet = utils.GetEnvWithDefaultValue("SERVICE_SUBNET", "10.237.6.0/18") - // fd11:1122:1111::/48, - KubeControllerManagerPodSubnet = utils.GetEnvWithDefaultValue("POD_SUBNET", "10.244.0.0/16") - - PreferredAddressType = corev1.NodeAddressType(utils.GetEnvWithDefaultValue("PREFERRED_ADDRESS_TYPE", string(corev1.NodeInternalIP))) -} diff --git a/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go b/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go deleted file mode 100644 index 2dff3ae5c..000000000 --- a/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go +++ /dev/null @@ -1,233 +0,0 @@ -package endpointcontroller - -import ( - "context" - "fmt" - "reflect" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type NodeGetter interface { - GetAPIServerNodes(client kubernetes.Interface, namespace string) (*v1.NodeList, error) -} - -type RealNodeGetter struct{} - -func (r *RealNodeGetter) GetAPIServerNodes(client kubernetes.Interface, namespace string) (*v1.NodeList, error) { - return util.GetAPIServerNodes(client, namespace) -} - -type APIServerExternalSyncController struct { - client.Client - EventRecorder record.EventRecorder - KubeClient kubernetes.Interface - NodeGetter NodeGetter -} - -const APIServerExternalSyncControllerName string = "api-server-external-service-sync-controller" - -func (e *APIServerExternalSyncController) SetupWithManager(mgr manager.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr). - Named(APIServerExternalSyncControllerName). - WithOptions(controller.Options{MaxConcurrentReconciles: 5}). - Watches(&source.Kind{Type: &v1.Pod{}}, handler.EnqueueRequestsFromMapFunc(e.newPodMapFunc())). - Complete(e) -} - -func (e *APIServerExternalSyncController) newPodMapFunc() handler.MapFunc { - return func(obj client.Object) []reconcile.Request { - pod, ok := obj.(*v1.Pod) - - if !ok { - klog.Warningf("Object is not a Pod, skipping: %v", obj) - return nil - } - - // If the pod contains the specified label virtualCluster-app=apiserver,it indicates that it belongs to vc. - if val, exists := pod.Labels[constants.Label]; exists && val == constants.LabelValue { - return []reconcile.Request{ - { - NamespacedName: client.ObjectKey{ - Name: pod.Name, - Namespace: pod.Namespace, - }, - }, - } - } - - return nil - } -} - -func (e *APIServerExternalSyncController) SyncAPIServerExternalEndpoints(ctx context.Context, k8sClient kubernetes.Interface, vc *v1alpha1.VirtualCluster) error { - if e.NodeGetter == nil { - return fmt.Errorf("NodeGetter is nil") - } - - nodes, err := e.NodeGetter.GetAPIServerNodes(e.KubeClient, vc.Namespace) - if err != nil { - return fmt.Errorf("failed to get API server nodes: %w", err) - } - - if len(nodes.Items) == 0 { - return fmt.Errorf("no API server nodes found in the cluster") - } - - var addresses []v1.EndpointAddress - for _, node := range nodes.Items { - for _, address := range node.Status.Addresses { - if address.Type == v1.NodeInternalIP { - addresses = append(addresses, v1.EndpointAddress{ - IP: address.Address, - }) - } - } - } - - if len(addresses) == 0 { - return fmt.Errorf("no internal IP addresses found for the API server nodes") - } - - apiServerPort, ok := vc.Status.PortMap[constants.APIServerPortKey] - if !ok { - return fmt.Errorf("failed to get API server port from VirtualCluster status") - } - klog.V(4).Infof("API server port: %d", apiServerPort) - - newEndpoint := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - }, - Subsets: []v1.EndpointSubset{ - { - Addresses: addresses, - Ports: []v1.EndpointPort{ - { - Name: "https", - Port: apiServerPort, - Protocol: v1.ProtocolTCP, - }, - }, - }, - }, - } - - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - _, err := k8sClient.CoreV1().Namespaces().Get(ctx, constants.KosmosNs, metav1.GetOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to get namespace kosmos-system: %w", err) - } - - currentEndpoint, err := k8sClient.CoreV1().Endpoints(constants.DefaultNs).Get(ctx, constants.APIServerExternalService, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(4).Info("No endpoint found in default namespace, skipping") - return nil - } - return fmt.Errorf("failed to get endpoint in default: %w", err) - } - - if !reflect.DeepEqual(currentEndpoint.Subsets, newEndpoint.Subsets) { - newEndpoint.ObjectMeta.Namespace = constants.DefaultNs - newEndpoint.ObjectMeta.ResourceVersion = currentEndpoint.ResourceVersion - _, err = k8sClient.CoreV1().Endpoints(constants.DefaultNs).Update(ctx, newEndpoint, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update endpoint in default: %w", err) - } - klog.V(4).Info("Updated api-server-external-service Endpoint in default") - } else { - klog.V(4).Info("No changes detected in default Endpoint, skipping update") - } - return nil - } - - currentEndpoint, err := k8sClient.CoreV1().Endpoints(constants.KosmosNs).Get(ctx, constants.APIServerExternalService, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - newEndpoint.ObjectMeta.Namespace = constants.KosmosNs - _, err = k8sClient.CoreV1().Endpoints(constants.KosmosNs).Create(ctx, newEndpoint, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create endpoint in kosmos-system: %w", err) - } - klog.V(4).Info("Created api-server-external-service Endpoint in kosmos-system") - return nil - } - return fmt.Errorf("failed to get endpoint in kosmos-system: %w", err) - } - - if !reflect.DeepEqual(currentEndpoint.Subsets, newEndpoint.Subsets) { - newEndpoint.ObjectMeta.Namespace = constants.KosmosNs - newEndpoint.ObjectMeta.ResourceVersion = currentEndpoint.ResourceVersion - _, err = k8sClient.CoreV1().Endpoints(constants.KosmosNs).Update(ctx, newEndpoint, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update endpoint in kosmos-system: %w", err) - } - klog.V(4).Info("Updated api-server-external-service Endpoint in kosmos-system") - } else { - klog.V(4).Info("No changes detected in kosmos-system Endpoint, skipping update") - } - return nil - }) -} - -func (e *APIServerExternalSyncController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ %s start to reconcile %s ============", APIServerExternalSyncControllerName, request.NamespacedName) - defer klog.V(4).Infof("============ %s finish to reconcile %s ============", APIServerExternalSyncControllerName, request.NamespacedName) - - var vcList v1alpha1.VirtualClusterList - if err := e.List(ctx, &vcList, client.InNamespace(request.NamespacedName.Namespace)); err != nil { - klog.Errorf("Failed to list VirtualClusters in namespace %s: %v", request.NamespacedName.Namespace, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if len(vcList.Items) == 0 { - klog.V(4).Infof("No VirtualCluster found in namespace %s", request.NamespacedName.Namespace) - return reconcile.Result{}, nil - } - - // A namespace should correspond to only one virtual cluster (vc). If it corresponds to multiple vcs, it indicates an error. - if len(vcList.Items) > 1 { - klog.Errorf("Multiple VirtualClusters found in namespace %s, expected only one", request.NamespacedName.Namespace) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - vc := vcList.Items[0] - - if vc.Status.Phase != v1alpha1.Completed { - klog.V(4).Infof("VirtualCluster %s is not in Completed phase", vc.Name) - return reconcile.Result{}, nil - } - - k8sClient, err := util.GenerateKubeclient(&vc) - if err != nil { - klog.Errorf("Failed to generate Kubernetes client for VirtualCluster %s: %v", vc.Name, err) - return reconcile.Result{}, nil - } - - if err := e.SyncAPIServerExternalEndpoints(ctx, k8sClient, &vc); err != nil { - klog.Errorf("Failed to sync apiserver external Endpoints: %v", err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - return reconcile.Result{}, nil -} diff --git a/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller_test.go b/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller_test.go deleted file mode 100644 index 64f13b01c..000000000 --- a/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package endpointcontroller - -import ( - "context" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -type MockNodeGetter struct { - Nodes *corev1.NodeList - Err error -} - -func (m *MockNodeGetter) GetAPIServerNodes(_ kubernetes.Interface, _ string) (*corev1.NodeList, error) { - return m.Nodes, m.Err -} - -func TestSyncAPIServerExternalEndpoints(t *testing.T) { - ctx := context.TODO() - vc := &v1alpha1.VirtualCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-vc", - Namespace: "test-ns", - }, - Status: v1alpha1.VirtualClusterStatus{ - Phase: v1alpha1.Completed, - PortMap: map[string]int32{ - constants.APIServerPortKey: 6443, - }, - }, - } - - nodes := &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Status: corev1.NodeStatus{ - Addresses: []corev1.NodeAddress{ - {Type: corev1.NodeInternalIP, Address: "192.168.1.1"}, - }, - }, - }, - }, - } - - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.KosmosNs, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - {IP: "192.168.1.1"}, - }, - Ports: []corev1.EndpointPort{ - {Name: "https", Port: 6443, Protocol: corev1.ProtocolTCP}, - }, - }, - }, - } - - kosmosNsObj := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.KosmosNs, - }, - } - - tests := []struct { - name string - objects []runtime.Object - mockNodes *corev1.NodeList - mockErr error - wantErr bool - wantErrString string - wantSubsets []corev1.EndpointSubset - setupKosmosNs bool - }{ - { - name: "Successfully syncs external endpoints in kosmos-system", - objects: []runtime.Object{kosmosNsObj}, - mockNodes: nodes, - wantSubsets: endpoint.Subsets, - setupKosmosNs: true, - }, - { - name: "Successfully syncs external endpoints in default when kosmos-system not exists", - objects: []runtime.Object{}, - mockNodes: nodes, - wantSubsets: endpoint.Subsets, - setupKosmosNs: false, - }, - { - name: "Updates existing endpoint in kosmos-system", - objects: []runtime.Object{ - kosmosNsObj, - &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.KosmosNs, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{{IP: "192.168.1.2"}}, - Ports: []corev1.EndpointPort{{Name: "https", Port: 6443, Protocol: corev1.ProtocolTCP}}, - }, - }, - }, - }, - mockNodes: nodes, - wantSubsets: endpoint.Subsets, - setupKosmosNs: true, - }, - { - name: "Updates existing endpoint in default namespace", - objects: []runtime.Object{ - &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.DefaultNs, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{{IP: "192.168.1.2"}}, - Ports: []corev1.EndpointPort{{Name: "https", Port: 6443, Protocol: corev1.ProtocolTCP}}, - }, - }, - }, - }, - mockNodes: nodes, - wantSubsets: endpoint.Subsets, - setupKosmosNs: false, - }, - { - name: "Does not update endpoint if no changes", - objects: []runtime.Object{endpoint}, - mockNodes: nodes, - wantSubsets: endpoint.Subsets, - }, - { - name: "Updates endpoint if changes detected", - objects: []runtime.Object{ - func() runtime.Object { - modifiedEndpoint := endpoint.DeepCopy() - modifiedEndpoint.Subsets[0].Addresses[0].IP = "192.168.1.2" - return modifiedEndpoint - }(), - }, - mockNodes: nodes, - wantSubsets: endpoint.Subsets, - }, - { - name: "Fails if no API server nodes are found", - objects: []runtime.Object{}, - mockNodes: &corev1.NodeList{}, - wantErr: true, - wantErrString: "no API server nodes found in the cluster", - }, - { - name: "Fails if no internal IP addresses are found", - objects: []runtime.Object{}, - mockNodes: &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{Name: "node-2"}, - Status: corev1.NodeStatus{ - Addresses: []corev1.NodeAddress{}, - }, - }, - }, - }, - wantErr: true, - wantErrString: "no internal IP addresses found for the API server nodes", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeHostClusterClient := fake.NewSimpleClientset(tt.objects...) - fakeVCClient := fake.NewSimpleClientset() - mockNodeGetter := &MockNodeGetter{Nodes: tt.mockNodes, Err: tt.mockErr} - - controller := &APIServerExternalSyncController{ - KubeClient: fakeHostClusterClient, - NodeGetter: mockNodeGetter, - } - - err := controller.SyncAPIServerExternalEndpoints(ctx, fakeVCClient, vc) - if tt.wantErr { - assert.Error(t, err) - if tt.wantErrString != "" { - assert.Contains(t, err.Error(), tt.wantErrString) - } - } else { - assert.NoError(t, err) - if tt.wantSubsets != nil { - var createdEndpoint *corev1.Endpoints - var err error - - if tt.setupKosmosNs { - createdEndpoint, err = fakeVCClient.CoreV1().Endpoints(constants.KosmosNs).Get(ctx, constants.APIServerExternalService, metav1.GetOptions{}) - } else { - createdEndpoint, err = fakeVCClient.CoreV1().Endpoints(constants.DefaultNs).Get(ctx, constants.APIServerExternalService, metav1.GetOptions{}) - } - - if err == nil { - assert.True(t, reflect.DeepEqual(createdEndpoint.Subsets, tt.wantSubsets)) - } - } - } - }) - } -} diff --git a/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go b/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go deleted file mode 100644 index 23822357c..000000000 --- a/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go +++ /dev/null @@ -1,202 +0,0 @@ -package endpointcontroller - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/klog" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type CoreDNSController struct { - client.Client - EventRecorder record.EventRecorder -} - -const CoreDNSSyncControllerName = "virtual-cluster-coredns-sync-controller" - -func (e *CoreDNSController) SetupWithManager(mgr manager.Manager) error { - skipEvent := func(obj client.Object) bool { - // Only handle the "kube-dns" service with namespacing - return obj.GetName() == constants.KubeDNSSVCName && obj.GetNamespace() != "" - } - - return controllerruntime.NewControllerManagedBy(mgr). - Named(CoreDNSSyncControllerName). - WithOptions(controller.Options{MaxConcurrentReconciles: 5}). - For(&v1.Service{}, - builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return skipEvent(createEvent.Object) - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { return skipEvent(updateEvent.ObjectNew) }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { return false }, - })). - Complete(e) -} - -func (e *CoreDNSController) SyncVirtualClusterSVC(ctx context.Context, k8sClient kubernetes.Interface, DNSPort int32, DNSTCPPort int32, MetricsPort int32) error { - virtualClusterSVC, err := k8sClient.CoreV1().Services(constants.SystemNs).Get(ctx, constants.KubeDNSSVCName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("get virtualcluster svc %s failed: %v", constants.KubeDNSSVCName, err) - } - - if virtualClusterSVC.Spec.Ports == nil { - return fmt.Errorf("svc %s ports is nil", constants.KubeDNSSVCName) - } - - updateSVC := virtualClusterSVC.DeepCopy() - - for i, port := range virtualClusterSVC.Spec.Ports { - if port.Name == "dns" { - updateSVC.Spec.Ports[i].TargetPort = intstr.IntOrString{Type: intstr.Int, IntVal: DNSPort} - } - if port.Name == "dns-tcp" { - updateSVC.Spec.Ports[i].TargetPort = intstr.IntOrString{Type: intstr.Int, IntVal: DNSTCPPort} - } - if port.Name == "metrics" { - updateSVC.Spec.Ports[i].TargetPort = intstr.IntOrString{Type: intstr.Int, IntVal: MetricsPort} - } - } - - if _, err := k8sClient.CoreV1().Services(constants.SystemNs).Update(ctx, updateSVC, metav1.UpdateOptions{}); err != nil { - return err - } - - return nil -} - -func (e *CoreDNSController) SyncVirtualClusterEPS(ctx context.Context, k8sClient kubernetes.Interface, DNSPort int32, DNSTCPPort int32, MetricsPort int32) error { - virtualEndPoints, err := k8sClient.CoreV1().Endpoints(constants.SystemNs).Get(ctx, constants.KubeDNSSVCName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("get virtualcluster eps %s failed: %v", constants.KubeDNSSVCName, err) - } - - if len(virtualEndPoints.Subsets) != 1 { - return fmt.Errorf("eps %s Subsets length is not 1", constants.KubeDNSSVCName) - } - - if virtualEndPoints.Subsets[0].Ports == nil { - return fmt.Errorf("eps %s ports length is nil", constants.KubeDNSSVCName) - } - - updateEPS := virtualEndPoints.DeepCopy() - - for i, port := range virtualEndPoints.Subsets[0].Ports { - if port.Name == "dns" { - updateEPS.Subsets[0].Ports[i].Port = DNSPort - } - if port.Name == "dns-tcp" { - updateEPS.Subsets[0].Ports[i].Port = DNSTCPPort - } - if port.Name == "metrics" { - updateEPS.Subsets[0].Ports[i].Port = MetricsPort - } - } - - if _, err := k8sClient.CoreV1().Endpoints(constants.SystemNs).Update(ctx, updateEPS, metav1.UpdateOptions{}); err != nil { - return err - } - - return nil -} - -func (e *CoreDNSController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ %s start to reconcile %s ============", CoreDNSSyncControllerName, request.NamespacedName) - defer klog.V(4).Infof("============ %s finish to reconcile %s ============", CoreDNSSyncControllerName, request.NamespacedName) - - // Find the corresponding virtualcluster based on the namespace of SVC - var virtualClusterList v1alpha1.VirtualClusterList - if err := e.List(ctx, &virtualClusterList); err != nil { - if apierrors.IsNotFound(err) { - return reconcile.Result{}, nil - } - klog.V(4).Infof("query virtualcluster failed: %v", err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - var targetVirtualCluster v1alpha1.VirtualCluster - hasVirtualCluster := false - for _, vc := range virtualClusterList.Items { - if vc.Namespace == request.Namespace { - targetVirtualCluster = vc - hasVirtualCluster = true - break - } - } - if !hasVirtualCluster { - klog.V(4).Infof("virtualcluster %s not found", request.Namespace) - return reconcile.Result{}, nil - } - - if targetVirtualCluster.Status.Phase != v1alpha1.AllNodeReady && targetVirtualCluster.Status.Phase != v1alpha1.Completed { - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if targetVirtualCluster.Spec.KubeInKubeConfig != nil && targetVirtualCluster.Spec.KubeInKubeConfig.UseTenantDNS { - return reconcile.Result{}, nil - } - - // Get the corresponding svc - var kubesvc v1.Service - if err := e.Get(ctx, request.NamespacedName, &kubesvc); err != nil { - klog.V(4).Infof("get kubesvc %s failed: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - dnsPort := int32(0) - dnsTCPPort := int32(0) - metricsPort := int32(0) - - for _, port := range kubesvc.Spec.Ports { - if port.Name == "dns" { - dnsPort = port.NodePort - } - if port.Name == "dns-tcp" { - dnsTCPPort = port.NodePort - } - if port.Name == "metrics" { - metricsPort = port.NodePort - } - } - - k8sClient, err := util.GenerateKubeclient(&targetVirtualCluster) - if err != nil { - klog.Errorf("virtualcluster %s crd kubernetes client failed: %v", targetVirtualCluster.Name, err) - return reconcile.Result{}, nil - } - - // do sync - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return e.SyncVirtualClusterEPS(ctx, k8sClient, dnsPort, dnsTCPPort, metricsPort) - }); err != nil { - klog.Errorf("virtualcluster %s sync virtualcluster endpoints failed: %v", targetVirtualCluster.Name, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return e.SyncVirtualClusterSVC(ctx, k8sClient, dnsPort, dnsTCPPort, metricsPort) - }); err != nil { - klog.Errorf("virtualcluster %s sync virtualcluster svc failed: %v", targetVirtualCluster.Name, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - return reconcile.Result{}, nil -} diff --git a/pkg/kubenest/controller/endpoints.sync.controller/konnectivity_sync_controller.go b/pkg/kubenest/controller/endpoints.sync.controller/konnectivity_sync_controller.go deleted file mode 100644 index 829a9efe1..000000000 --- a/pkg/kubenest/controller/endpoints.sync.controller/konnectivity_sync_controller.go +++ /dev/null @@ -1,146 +0,0 @@ -package endpointcontroller - -import ( - "context" - "fmt" - "strings" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/klog" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type KonnectivityController struct { - client.Client - EventRecorder record.EventRecorder -} - -const KonnectivitySyncControllerName = "virtual-cluster-konnectivity-sync-controller" - -func (e *KonnectivityController) SetupWithManager(mgr manager.Manager) error { - skipEvent := func(obj client.Object) bool { - // Only handle the "konnectivity-server" endpoints - return strings.HasSuffix(obj.GetName(), constants.KonnectivityServerSuffix) - } - - return controllerruntime.NewControllerManagedBy(mgr). - Named(KonnectivitySyncControllerName). - WithOptions(controller.Options{MaxConcurrentReconciles: 5}). - For(&v1.Endpoints{}, - builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return skipEvent(createEvent.Object) - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { return skipEvent(updateEvent.ObjectNew) }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { return false }, - })). - Complete(e) -} - -func (e *KonnectivityController) SyncVirtualClusterEPS(ctx context.Context, k8sClient kubernetes.Interface, eps v1.Endpoints) error { - virtualEndPoints, err := k8sClient.CoreV1().Endpoints(constants.SystemNs).Get(ctx, constants.KonnectivityServerSuffix, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("get virtualcluster eps %s failed: %v", constants.KonnectivityServerSuffix, err) - } - - if len(virtualEndPoints.Subsets) == 0 { - return fmt.Errorf("virtualcluster eps %s has no subsets", constants.KonnectivityServerSuffix) - } - - if len(virtualEndPoints.Subsets[0].Ports) == 0 { - return fmt.Errorf("virtualcluster eps %s has no ports", constants.KonnectivityServerSuffix) - } - - // fix bug: https://github.com/kosmos-io/kosmos/issues/683 - if len(eps.Subsets) == 0 { - return fmt.Errorf("eps %s has no subsets", eps.Name) - } - - // only sync the address of the konnectivity-server endpoints - targetPort := virtualEndPoints.Subsets[0].Ports[0].Port - updateEPS := virtualEndPoints.DeepCopy() - - copyFromEPS := eps.DeepCopy() - updateEPS.Subsets = copyFromEPS.Subsets - for i := range updateEPS.Subsets { - if len(updateEPS.Subsets[i].Ports) == 0 { - continue - } - updateEPS.Subsets[i].Ports[0].Port = targetPort - } - - if _, err := k8sClient.CoreV1().Endpoints(constants.SystemNs).Update(ctx, updateEPS, metav1.UpdateOptions{}); err != nil { - return err - } - - return nil -} - -func (e *KonnectivityController) GetVirtualCluster(ctx context.Context, eps v1.Endpoints) (*v1alpha1.VirtualCluster, error) { - virtualClusterName := strings.TrimSuffix(eps.GetName(), "-"+constants.KonnectivityServerSuffix) - vartialClusterNamespace := eps.GetNamespace() - var vc v1alpha1.VirtualCluster - if err := e.Get(ctx, types.NamespacedName{Name: virtualClusterName, Namespace: vartialClusterNamespace}, &vc); err != nil { - return nil, err - } - return &vc, nil -} - -func (e *KonnectivityController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ %s start to reconcile %s ============", KonnectivitySyncControllerName, request.NamespacedName) - defer klog.V(4).Infof("============ %s finish to reconcile %s ============", KonnectivitySyncControllerName, request.NamespacedName) - - // Get the corresponding svc - var kubeEPS v1.Endpoints - if err := e.Get(ctx, request.NamespacedName, &kubeEPS); err != nil { - klog.V(4).Infof("get kubeEPS %s failed: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - targetVirtualCluster, err := e.GetVirtualCluster(ctx, kubeEPS) - if err != nil { - klog.V(4).Infof("query virtualcluster failed: %v", err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if targetVirtualCluster.Status.Phase != v1alpha1.AllNodeReady && targetVirtualCluster.Status.Phase != v1alpha1.Completed { - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if targetVirtualCluster.Spec.KubeInKubeConfig != nil && targetVirtualCluster.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { - return reconcile.Result{}, nil - } - - k8sClient, err := util.GenerateKubeclient(targetVirtualCluster) - if err != nil { - klog.Errorf("virtualcluster %s crd kubernetes client failed: %v", targetVirtualCluster.Name, err) - return reconcile.Result{}, nil - } - - // // do sync - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return e.SyncVirtualClusterEPS(ctx, k8sClient, kubeEPS) - }); err != nil { - klog.Errorf("virtualcluster %s sync virtualcluster svc failed: %v", targetVirtualCluster.Name, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - return reconcile.Result{}, nil -} diff --git a/pkg/kubenest/controller/global.node.controller/global_node_controller.go b/pkg/kubenest/controller/global.node.controller/global_node_controller.go deleted file mode 100644 index 8ce8a6976..000000000 --- a/pkg/kubenest/controller/global.node.controller/global_node_controller.go +++ /dev/null @@ -1,307 +0,0 @@ -package globalnodecontroller - -import ( - "context" - "reflect" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type GlobalNodeController struct { - client.Client - RootClientSet kubernetes.Interface - EventRecorder record.EventRecorder - KosmosClient versioned.Interface -} - -// compareMaps compares two map[string]string and returns true if they are equal -func compareMaps(map1, map2 map[string]string) bool { - // If lengths are different, the maps are not equal - if len(map1) != len(map2) { - return false - } - - // Iterate over map1 and check if all keys and values are present in map2 - for key, value1 := range map1 { - if value2, ok := map2[key]; !ok || value1 != value2 { - return false - } - } - - // If no discrepancies are found, the maps are equal - return true -} - -// CustomPredicateForGlobalNode is used for event filtering of the GlobalNode resource. -var CustomPredicateForGlobalNode = predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return true - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { - oldObj, okOld := updateEvent.ObjectOld.(*v1alpha1.GlobalNode) - newObj, okNew := updateEvent.ObjectNew.(*v1alpha1.GlobalNode) - - if !okOld || !okNew { - return true - } - - specChanged := !reflect.DeepEqual(oldObj.Spec, newObj.Spec) - - return specChanged - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return true - }, - GenericFunc: func(e event.GenericEvent) bool { - return true - }, -} - -func (r *GlobalNodeController) SetupWithManager(mgr manager.Manager) error { - if r.Client == nil { - r.Client = mgr.GetClient() - } - - return ctrl.NewControllerManagedBy(mgr). - Named(constants.GlobalNodeControllerName). - WithOptions(controller.Options{MaxConcurrentReconciles: 5}). - For(&v1.Node{}, builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return true - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { - oldObj := updateEvent.ObjectOld.(*v1.Node) - newObj := updateEvent.ObjectNew.(*v1.Node) - - return !compareMaps(oldObj.Labels, newObj.Labels) - }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return false - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - return false - }, - })). - Watches(&source.Kind{Type: &v1alpha1.GlobalNode{}}, handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { - gn := a.(*v1alpha1.GlobalNode) - - return []reconcile.Request{ - {NamespacedName: types.NamespacedName{ - Name: gn.Name, - }}, - } - }), builder.WithPredicates(CustomPredicateForGlobalNode)). - // Watches(&source.Kind{Type: &v1.Node{}}, handler.EnqueueRequestsFromMapFunc(r.newNodeMapFunc())). - Watches(&source.Kind{Type: &v1alpha1.VirtualCluster{}}, handler.EnqueueRequestsFromMapFunc(r.newVirtualClusterMapFunc())). - Complete(r) -} - -func (r *GlobalNodeController) newVirtualClusterMapFunc() handler.MapFunc { - return func(a client.Object) []reconcile.Request { - var requests []reconcile.Request - vcluster := a.(*v1alpha1.VirtualCluster) - if vcluster.Status.Phase != v1alpha1.Completed { - return requests - } - klog.V(4).Infof("global-node-controller: virtualclusternode change to completed: %s", vcluster.Name) - for _, nodeInfo := range vcluster.Spec.PromoteResources.NodeInfos { - requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{ - Name: nodeInfo.NodeName, - }}) - } - return requests - } -} - -// func (r *GlobalNodeController) newNodeMapFunc() handler.MapFunc { -// return func(a client.Object) []reconcile.Request { -// var requests []reconcile.Request -// node := a.(*v1.Node) -// klog.V(4).Infof("global-node-controller: node change: %s", node.Name) -// requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{ -// Name: node.Name, -// }}) -// return requests -// } -// } - -func (r *GlobalNodeController) SyncTaint(ctx context.Context, globalNode *v1alpha1.GlobalNode) error { - if globalNode.Spec.State == v1alpha1.NodeFreeState { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - var targetNode v1.Node - if err := r.Get(ctx, types.NamespacedName{Name: globalNode.Name}, &targetNode); err != nil { - klog.Errorf("global-node-controller: SyncTaints: can not get host node, err: %s", globalNode.Name) - return err - } - - if targetNode.Spec.Unschedulable { - klog.V(4).Infof("global-node-controller: SyncTaints: node is unschedulable %s, skip", globalNode.Name) - return nil - } - - if _, ok := targetNode.Labels[env.GetControlPlaneLabel()]; ok { - klog.V(4).Infof("global-node-controller: SyncTaints: control-plane node %s, skip", globalNode.Name) - return nil - } - - return util.DrainNode(ctx, targetNode.Name, r.RootClientSet, &targetNode, env.GetDrainWaitSeconds(), true) - }) - return err - } - klog.V(4).Infof("global-node-controller: SyncTaints: node status is %s, skip", globalNode.Spec.State, globalNode.Name) - return nil -} - -func (r *GlobalNodeController) SyncState(ctx context.Context, globalNode *v1alpha1.GlobalNode) error { - if globalNode.Spec.State == v1alpha1.NodeInUse { - klog.V(4).Infof("global-node-controller: SyncState: node is in use %s, skip", globalNode.Name) - return nil - } - - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - var hostNode v1.Node - if err := r.Get(ctx, types.NamespacedName{Name: globalNode.Name}, &hostNode); err != nil { - klog.Errorf("global-node-controller: SyncState: can not get global node, err: %s", globalNode.Name) - return err - } - - updateHostNode := hostNode.DeepCopy() - - v, ok := updateHostNode.Labels[constants.StateLabelKey] - if ok && v == string(globalNode.Spec.State) { - return nil - } - - updateHostNode.Labels[constants.StateLabelKey] = string(globalNode.Spec.State) - if err := r.Update(ctx, updateHostNode); err != nil { - klog.Errorf("global-node-controller: SyncState: update node label failed, err: %s", globalNode.Name) - return err - } - return nil - }) - return err -} - -func (r *GlobalNodeController) SyncLabel(ctx context.Context, globalNode *v1alpha1.GlobalNode) error { - if globalNode.Spec.State == v1alpha1.NodeInUse { - klog.V(4).Infof("global-node-controller: SyncLabel: node is in use %s, skip", globalNode.Name) - return nil - } - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - rootNode, err := r.RootClientSet.CoreV1().Nodes().Get(ctx, globalNode.Name, metav1.GetOptions{}) - if err != nil { - klog.Errorf("global-node-controller: SyncLabel: can not get root node: %s", globalNode.Name) - return err - } - - if _, err = r.KosmosClient.KosmosV1alpha1().GlobalNodes().Get(ctx, globalNode.Name, metav1.GetOptions{}); err != nil { - klog.Errorf("global-node-controller: SyncLabel: can not get global node: %s", globalNode.Name) - return err - } - - // Use management plane node label to override global node - updateGlobalNode := globalNode.DeepCopy() - if compareMaps(updateGlobalNode.Spec.Labels, rootNode.Labels) { - return nil - } - updateGlobalNode.Spec.Labels = rootNode.Labels - - if _, err = r.KosmosClient.KosmosV1alpha1().GlobalNodes().Update(ctx, updateGlobalNode, metav1.UpdateOptions{}); err != nil { - klog.Errorf("global-node-controller: SyncLabel: update global node label failed, err: %s", err) - return err - } - return nil - }) - return err -} - -func (r *GlobalNodeController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ global-node-controller start to reconcile %s ============", request.NamespacedName) - defer klog.V(4).Infof("============ global-node-controller finish to reconcile %s ============", request.NamespacedName) - - var globalNode v1alpha1.GlobalNode - if err := r.Get(ctx, request.NamespacedName, &globalNode); err != nil { - if apierrors.IsNotFound(err) { - klog.V(4).Infof("global-node-controller: can not found %s", request.NamespacedName) - // If global node does not found, create it - var rootNode *v1.Node - if rootNode, err = r.RootClientSet.CoreV1().Nodes().Get(ctx, request.Name, metav1.GetOptions{}); err != nil { - klog.Errorf("global-node-controller: can not found root node: %s", request.Name) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - globalNode.Name = request.Name - globalNode.Spec.State = v1alpha1.NodeReserved - firstNodeIP, err := utils.FindFirstNodeIPAddress(*rootNode, constants.PreferredAddressType) - if err != nil { - klog.Errorf("get first node ip address err: %s %s", constants.PreferredAddressType, err.Error()) - } - globalNode.Spec.NodeIP = firstNodeIP - if _, err = r.KosmosClient.KosmosV1alpha1().GlobalNodes().Create(ctx, &globalNode, metav1.CreateOptions{}); err != nil { - klog.Errorf("global-node-controller: can not create global node: %s", globalNode.Name) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - klog.V(4).Infof("global-node-controller: %s has been created", globalNode.Name) - // do sync label and taint - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - klog.Errorf("get global-node %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - // if err := r.SyncState(ctx, &globalNode); err != nil { - // klog.Errorf("sync State %s error: %v", request.NamespacedName, err) - // return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - // } else { - // klog.V(4).Infof("sync state successed, %s", request.NamespacedName) - // } - - _, err := r.RootClientSet.CoreV1().Nodes().Get(ctx, globalNode.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return reconcile.Result{}, nil - } - klog.Errorf("can not get root node: %s", globalNode.Name) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - if globalNode.Spec.State == v1alpha1.NodeInUse { - // wait globalNode free - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if err = r.SyncLabel(ctx, &globalNode); err != nil { - klog.Warningf("sync label %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - klog.V(4).Infof("sync label successed, %s", request.NamespacedName) - - if err = r.SyncTaint(ctx, &globalNode); err != nil { - klog.Errorf("sync taint %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - klog.V(4).Infof("sync taint successed, %s", request.NamespacedName) - - return reconcile.Result{}, nil -} diff --git a/pkg/kubenest/controller/global.node.controller/global_node_lifecycle_controller.go b/pkg/kubenest/controller/global.node.controller/global_node_lifecycle_controller.go deleted file mode 100644 index e2c842213..000000000 --- a/pkg/kubenest/controller/global.node.controller/global_node_lifecycle_controller.go +++ /dev/null @@ -1,166 +0,0 @@ -package globalnodecontroller - -import ( - "context" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" -) - -const ( - GlobalNodeStatusControllerName = "global-node-status-controller" - NodeNotReady = v1.NodeConditionType("NotReady") - NodeReady = v1.NodeReady - DefaultStatusUpdateInterval = 15 * time.Second - ClientHeartbeatThreshold = 10 * time.Second - nodeUpdateWorkerSize = 8 - RequiredNotReadyCount = 5 -) - -type nodeHealthData struct { - notReadyCount int -} - -type GlobalNodeStatusController struct { - root client.Client - statusInterval time.Duration - - kosmosClient versioned.Interface - nodeHealthMap sync.Map // map[string]*nodeHealthData -} - -func NewGlobalNodeStatusController( - root client.Client, - kosmosClient versioned.Interface, -) *GlobalNodeStatusController { - return &GlobalNodeStatusController{ - root: root, - statusInterval: DefaultStatusUpdateInterval, - kosmosClient: kosmosClient, - nodeHealthMap: sync.Map{}, - } -} -func (c *GlobalNodeStatusController) Start(ctx context.Context) error { - go wait.UntilWithContext(ctx, c.syncGlobalNodeStatus, c.statusInterval) - - <-ctx.Done() - return nil -} -func (c *GlobalNodeStatusController) syncGlobalNodeStatus(ctx context.Context) { - globalNodes := make([]*v1alpha1.GlobalNode, 0) - - nodeList, err := c.kosmosClient.KosmosV1alpha1().GlobalNodes().List(ctx, metav1.ListOptions{}) - if err != nil { - klog.Errorf("Failed to fetch GlobalNodes: %v", err) - return - } - for _, node := range nodeList.Items { - nodeCopy := node.DeepCopy() - globalNodes = append(globalNodes, nodeCopy) - } - - err = c.updateGlobalNodeStatus(ctx, globalNodes) - if err != nil { - klog.Errorf("Failed to sync global node status: %v", err) - } -} - -func (c *GlobalNodeStatusController) updateGlobalNodeStatus( - ctx context.Context, - globalNodes []*v1alpha1.GlobalNode, -) error { - errChan := make(chan error, len(globalNodes)) - - workqueue.ParallelizeUntil(ctx, nodeUpdateWorkerSize, len(globalNodes), func(piece int) { - node := globalNodes[piece] - if err := c.updateStatusForGlobalNode(ctx, node); err != nil { - klog.Errorf("Failed to update status for global node %s: %v", node.Name, err) - errChan <- err - } - }) - - close(errChan) - - var retErr error - for err := range errChan { - retErr = err - } - return retErr -} - -func (c *GlobalNodeStatusController) updateStatusForGlobalNode( - ctx context.Context, - globalNode *v1alpha1.GlobalNode, -) error { - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - currentNode, err := c.kosmosClient.KosmosV1alpha1().GlobalNodes().Get(ctx, globalNode.Name, metav1.GetOptions{}) - if err != nil { - klog.Errorf("Failed to fetch the latest GlobalNode %s: %v", globalNode.Name, err) - return err - } - - if len(currentNode.Status.Conditions) == 0 { - klog.Warningf("GlobalNode %s has no conditions, skipping status update", currentNode.Name) - return nil - } - - condition := currentNode.Status.Conditions[0] - lastHeartbeatTime := condition.LastHeartbeatTime - timeDiff := time.Since(lastHeartbeatTime.Time) - - statusType := NodeReady - if timeDiff > ClientHeartbeatThreshold { - statusType = NodeNotReady - } - - dataRaw, _ := c.nodeHealthMap.LoadOrStore(globalNode.Name, &nodeHealthData{}) - nh := dataRaw.(*nodeHealthData) - - if statusType == NodeNotReady { - nh.notReadyCount++ - if condition.Type == NodeReady { - klog.V(2).Infof("GlobalNode %s: notReadyCount=%d, newStatus=%s", globalNode.Name, nh.notReadyCount, statusType) - } - } else { - nh.notReadyCount = 0 - } - - if nh.notReadyCount > 0 && nh.notReadyCount < RequiredNotReadyCount { - c.nodeHealthMap.Store(globalNode.Name, nh) - return nil - } - - if condition.Type != statusType { - condition.Type = statusType - condition.LastTransitionTime = metav1.NewTime(time.Now()) - - currentNode.Status.Conditions[0] = condition - - _, err = c.kosmosClient.KosmosV1alpha1().GlobalNodes().UpdateStatus(ctx, currentNode, metav1.UpdateOptions{}) - if err != nil { - if errors.IsConflict(err) { - klog.Warningf("Conflict detected while updating status for GlobalNode %s, retrying...", globalNode.Name) - } else { - klog.Errorf("Failed to update status for GlobalNode %s: %v", globalNode.Name, err) - } - return err - } - - klog.Infof("Successfully updated status for GlobalNode %s to %s", globalNode.Name, statusType) - nh.notReadyCount = 0 - c.nodeHealthMap.Store(globalNode.Name, nh) - } - return nil - }) -} diff --git a/pkg/kubenest/controller/global.node.controller/global_node_lifecycle_controller_test.go b/pkg/kubenest/controller/global.node.controller/global_node_lifecycle_controller_test.go deleted file mode 100644 index 24533f629..000000000 --- a/pkg/kubenest/controller/global.node.controller/global_node_lifecycle_controller_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package globalnodecontroller - -import ( - "context" - "fmt" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - kosmosfake "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned/fake" -) - -// nolint -func TestUpdateStatusForGlobalNode(t *testing.T) { - tests := []struct { - name string - initialNode *v1alpha1.GlobalNode - nodeList []*v1alpha1.GlobalNode - expectedStatus string - }{ - { - name: "No condition to update", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - }, - }, - }, - }, - }, - expectedStatus: "Ready", - }, - { - name: "Status update required", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)), - }, - }, - }, - }, - }, - expectedStatus: "NotReady", - }, - { - name: "No nodes in list", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node3", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-30 * time.Minute)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-30 * time.Minute)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{}, - expectedStatus: "", - }, - { - name: "Node Ready status recently updated", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node4", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-5 * time.Minute)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-5 * time.Minute)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node4", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-5 * time.Minute)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-5 * time.Minute)), - }, - }, - }, - }, - }, - expectedStatus: "Ready", - }, - { - name: "Node status changed from Ready to NotReady", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node5", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node5", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-3 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-3 * time.Hour)), - }, - }, - }, - }, - }, - expectedStatus: "NotReady", - }, - { - name: "Node added to list but with no conditions", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node6", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-30 * time.Minute)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-30 * time.Minute)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node6", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{}, - }, - }, - }, - expectedStatus: "NotReady", - }, - { - name: "Multiple nodes with mixed statuses", - initialNode: &v1alpha1.GlobalNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node7", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-1 * time.Hour)), - }, - }, - }, - }, - nodeList: []*v1alpha1.GlobalNode{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node7", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)), - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node8", - }, - Status: v1alpha1.GlobalNodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - LastHeartbeatTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), - LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), - }, - }, - }, - }, - }, - expectedStatus: "NotReady", - }, - } - - ctx := context.TODO() - - scheme := runtime.NewScheme() - _ = v1alpha1.AddToScheme(scheme) - - rootClient := fake.NewClientBuilder().WithScheme(scheme).Build() - kosmosclient := kosmosfake.NewSimpleClientset() - - controller := NewGlobalNodeStatusController( - rootClient, - kosmosclient, - ) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fmt.Println(tt.initialNode.Name + "success") - for _, node := range tt.nodeList { - _, err := kosmosclient.KosmosV1alpha1().GlobalNodes().Create(ctx, node, metav1.CreateOptions{}) - if err != nil { - return - } - } - err := controller.updateStatusForGlobalNode(ctx, tt.initialNode) - if err != nil { - return - } - fmt.Println(string(tt.initialNode.Status.Conditions[0].Type) == tt.expectedStatus) - }) - } -} diff --git a/pkg/kubenest/controller/kosmos/kosmos_join_controller.go b/pkg/kubenest/controller/kosmos/kosmos_join_controller.go deleted file mode 100644 index 1fe9c2e91..000000000 --- a/pkg/kubenest/controller/kosmos/kosmos_join_controller.go +++ /dev/null @@ -1,606 +0,0 @@ -package controller - -import ( - "context" - "encoding/base64" - "fmt" - "os" - "reflect" - "sync" - - corev1 "k8s.io/api/core/v1" - extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/cert" - clusterManager "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - file "github.com/kosmos.io/kosmos/pkg/kosmosctl/manifest" - kosmosctl "github.com/kosmos.io/kosmos/pkg/kosmosctl/util" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - manifest "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/kosmos" - "github.com/kosmos.io/kosmos/pkg/utils" - "github.com/kosmos.io/kosmos/pkg/version" -) - -type KosmosJoinController struct { - client.Client - EventRecorder record.EventRecorder - KubeConfig *restclient.Config - KubeconfigStream []byte - AllowNodeOwnbyMulticluster bool -} - -var nodeOwnerMap = make(map[string]string) -var mu sync.Mutex -var once sync.Once - -func (c *KosmosJoinController) RemoveClusterFinalizer(cluster *v1alpha1.Cluster, kosmosClient versioned.Interface) error { - for _, finalizer := range []string{utils.ClusterStartControllerFinalizer, clusterManager.ControllerFinalizerName} { - if controllerutil.ContainsFinalizer(cluster, finalizer) { - controllerutil.RemoveFinalizer(cluster, finalizer) - } - } - klog.Infof("remove finalizer for cluster %s", cluster.Name) - - _, err := kosmosClient.KosmosV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{}) - if err != nil { - klog.Errorf("cluster %s failed remove finalizer: %v", cluster.Name, err) - return err - } - klog.Infof("update cluster after remove finalizer for cluster %s", cluster.Name) - return nil -} - -func (c *KosmosJoinController) InitNodeOwnerMap() { - vcList := &v1alpha1.VirtualClusterList{} - err := c.List(context.Background(), vcList) - if err != nil { - klog.Errorf("list virtual cluster error: %v", err) - return - } - for _, vc := range vcList.Items { - if vc.Status.Phase == v1alpha1.Completed { - kubeconfigStream, err := base64.StdEncoding.DecodeString(vc.Spec.Kubeconfig) - if err != nil { - klog.Errorf("virtualcluster %s decode target kubernetes kubeconfig %s err: %v", vc.Name, vc.Spec.Kubeconfig, err) - continue - } - kosmosClient, _, k8sExtensionsClient, err := c.InitTargetKubeclient(kubeconfigStream) - if err != nil { - klog.Errorf("virtualcluster %s crd kubernetes client failed: %v", vc.Name, err) - continue - } - _, err = k8sExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "clusters.kosmos.io", metav1.GetOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - klog.Errorf("virtualcluster %s get crd clusters.kosmos.io err: %v", vc.Name, err) - } - klog.Infof("virtualcluster %s crd clusters.kosmos.io doesn't exist", vc.Name) - continue - } - clusters, err := kosmosClient.KosmosV1alpha1().Clusters().List(context.Background(), metav1.ListOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - klog.Infof("virtualcluster %s get clusters err: %v", vc.Name, err) - } - klog.Infof("virtualcluster %s cluster doesn't exist", vc.Name) - continue - } - mu.Lock() - for _, cluster := range clusters.Items { - for _, node := range cluster.Spec.ClusterTreeOptions.LeafModels { - if vcName, ok := nodeOwnerMap[node.LeafNodeName]; ok && len(vcName) > 0 { - klog.Warningf("node %s also belong to cluster %s", node.LeafNodeName, vcName) - } - nodeOwnerMap[node.LeafNodeName] = vc.Name - } - } - mu.Unlock() - } - klog.Infof("check virtualcluster %s, nodeOwnerMap is %v", vc.Name, nodeOwnerMap) - } - klog.Infof("Init nodeOwnerMap is %v", nodeOwnerMap) -} - -func (c *KosmosJoinController) UninstallClusterTree(ctx context.Context, request reconcile.Request, vc *v1alpha1.VirtualCluster) error { - klog.Infof("Start deleting kosmos-clustertree deployment %s/%s-clustertree-cluster-manager...", request.Namespace, request.Name) - clustertreeDeploy, err := kosmosctl.GenerateDeployment(manifest.ClusterTreeClusterManagerDeployment, manifest.DeploymentReplace{ - Namespace: request.Namespace, - ImageRepository: "null", - Version: "null", - Name: request.Name, - FilePath: constants.DefaultKubeconfigPath, - }) - if err != nil { - return err - } - - deleteRequest := types.NamespacedName{ - Namespace: request.Namespace, - Name: clustertreeDeploy.Name, - } - err = c.Get(ctx, deleteRequest, clustertreeDeploy) - if err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("get clustertree deployment %s-clustertree-cluster-manager error, deployment deleted failed: %v", - request.Name, err) - } - klog.Infof("clustertree deployment %s-clustertree-cluster-manager doesn't exist", request.Name) - } else { - err := c.Delete(ctx, clustertreeDeploy) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("delete kosmos-clustertree deployment %s-clustertree-cluster-manager error: %v", - request.Name, err) - } - } - - klog.Infof("Deployment %s/%s-clustertree-cluster-manager has been deleted. ", request.Namespace, request.Name) - - klog.Infof("Start deleting kosmos-clustertree secret %s/%s-clustertree-cluster-manager", request.Namespace, request.Name) - clustertreeSecret, err := kosmosctl.GenerateSecret(manifest.ClusterTreeClusterManagerSecret, manifest.SecretReplace{ - Namespace: request.Namespace, - Cert: cert.GetCrtEncode(), - Key: cert.GetKeyEncode(), - Name: request.Name, - }) - if err != nil { - return err - } - deleteRequest.Name = clustertreeSecret.Name - err = c.Get(ctx, deleteRequest, clustertreeSecret) - if err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("get clustertree secret error, secret %s/%s-clustertree-cluster-manager deleted failed: %v", - request.Namespace, request.Name, err) - } - klog.Infof("clustertree secret doesn't exist") - } else { - err := c.Delete(ctx, clustertreeSecret) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("delete kosmos-clustertree secret %s/%s-clustertree-cluster-manager error: %v", request.Namespace, request.Name, err) - } - } - klog.Infof("Secret %s/%s-clustertree-cluster-manager has been deleted. ", request.Namespace, request.Name) - - clusterName := fmt.Sprintf("virtualcluster-%s-%s", request.Namespace, request.Name) - klog.Infof("Attempting to delete cluster %s...", clusterName) - - kubeconfigStream, err := base64.StdEncoding.DecodeString(vc.Spec.Kubeconfig) - if err != nil { - return fmt.Errorf("decode target kubernetes kubeconfig %s err: %v", vc.Spec.Kubeconfig, err) - } - kosmosClient, _, _, err := c.InitTargetKubeclient(kubeconfigStream) - if err != nil { - return fmt.Errorf("create kubernetes client failed: %v", err) - } - - old, err := kosmosClient.KosmosV1alpha1().Clusters().Get(context.TODO(), - clusterName, metav1.GetOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("get cluster %s failed when we try to del: %v", clusterName, err) - } - } else { - if !c.AllowNodeOwnbyMulticluster { - mu.Lock() - for _, nodeName := range old.Spec.ClusterTreeOptions.LeafModels { - nodeOwnerMap[nodeName.LeafNodeName] = "" - } - mu.Unlock() - } - err = c.RemoveClusterFinalizer(old, kosmosClient) - if err != nil { - return fmt.Errorf("removefinalizer %s failed: %v", clusterName, err) - } - err = kosmosClient.KosmosV1alpha1().Clusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("delete cluster %s failed: %v", clusterName, err) - } - } - klog.Infof("Cluster %s has been deleted.", clusterName) - if err := c.RemoveFinalizer(ctx, vc); err != nil { - return fmt.Errorf("remove finalizer error: %v", err) - } - return nil -} - -func (c *KosmosJoinController) InitTargetKubeclient(kubeconfigStream []byte) (versioned.Interface, kubernetes.Interface, extensionsclient.Interface, error) { - //targetKubeconfig := path.Join(DefaultKubeconfigPath, "kubeconfig") - //config, err := utils.RestConfig(targetKubeconfig, "") - config, err := utils.NewConfigFromBytes(kubeconfigStream) - if err != nil { - return nil, nil, nil, fmt.Errorf("generate kubernetes config failed: %s", err) - } - - kosmosClient, err := versioned.NewForConfig(config) - if err != nil { - return nil, nil, nil, fmt.Errorf("generate Kosmos client failed: %v", err) - } - - k8sClient, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, nil, nil, fmt.Errorf("generate K8s basic client failed: %v", err) - } - - k8sExtensionsClient, err := extensionsclient.NewForConfig(config) - if err != nil { - return nil, nil, nil, fmt.Errorf("generate K8s extensions client failed: %v", err) - } - - return kosmosClient, k8sClient, k8sExtensionsClient, nil -} - -func (c *KosmosJoinController) DeployKosmos(ctx context.Context, request reconcile.Request, vc *v1alpha1.VirtualCluster) error { - klog.Infof("Start creating kosmos-clustertree secret %s/%s-clustertree-cluster-manager", request.Namespace, request.Name) - clustertreeSecret, err := kosmosctl.GenerateSecret(manifest.ClusterTreeClusterManagerSecret, manifest.SecretReplace{ - Namespace: request.Namespace, - Cert: cert.GetCrtEncode(), - Key: cert.GetKeyEncode(), - Kubeconfig: vc.Spec.Kubeconfig, - Name: request.Name, - }) - if err != nil { - return err - } - err = c.Create(ctx, clustertreeSecret) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("install clustertree error, secret %s/%s-clustertree-cluster-manager created failed: %v", - request.Namespace, request.Name, err) - } - } - klog.Infof("Secret %s/%s-clustertree-cluster-manager has been created. ", request.Namespace, request.Name) - - klog.Infof("Start creating kosmos-clustertree deployment %s/%s-clustertree-cluster-manager...", request.Namespace, request.Name) - imageRepository := os.Getenv(constants.DefaultImageRepositoryEnv) - if len(imageRepository) == 0 { - imageRepository = utils.DefaultImageRepository - } - - //TODO: hard coded,modify in future - imageVersion := "v0.3.0" //os.Getenv(constants.DefauleImageVersionEnv) - if len(imageVersion) == 0 { - imageVersion = fmt.Sprintf("v%s", version.GetReleaseVersion().PatchRelease()) - } - clustertreeDeploy, err := kosmosctl.GenerateDeployment(manifest.ClusterTreeClusterManagerDeployment, manifest.DeploymentReplace{ - Namespace: request.Namespace, - ImageRepository: imageRepository, - Version: imageVersion, - FilePath: constants.DefaultKubeconfigPath, - Name: request.Name, - }) - if err != nil { - return err - } - err = c.Create(ctx, clustertreeDeploy) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("install clustertree error, deployment %s/%s-clustertree-cluster-manager created failed: %v", - request.Namespace, request.Name, err) - } - } - klog.Infof("Deployment %s/%s-clustertree-cluster-manager has been created. ", request.Namespace, request.Name) - return nil -} - -func (c *KosmosJoinController) ClearSomeNodeOwner(nodeNames *[]string) { - if !c.AllowNodeOwnbyMulticluster { - mu.Lock() - for _, nodeName := range *nodeNames { - nodeOwnerMap[nodeName] = "" - } - mu.Unlock() - } -} - -func (c *KosmosJoinController) CreateClusterObject(_ context.Context, _ reconcile.Request, - vc *v1alpha1.VirtualCluster, hostK8sClient kubernetes.Interface, cluster *v1alpha1.Cluster) (*[]string, *map[string]struct{}, error) { - var leafModels []v1alpha1.LeafModel - // recored new nodes' name, if error happen before create or update, need clear newNodeNames - newNodeNames := []string{} - // record all nodes' name in a map, when update cr, may need to delete some old node - // compare all nodes in cluster cr to all node exits in virtual cluster,we can find which ndoe should be deleted - allNodeNamesMap := map[string]struct{}{} - - for _, nodeInfo := range vc.Spec.PromoteResources.NodeInfos { - _, err := hostK8sClient.CoreV1().Nodes().Get(context.Background(), nodeInfo.NodeName, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.Warningf("node %s doesn't exits: %v", nodeInfo.NodeName, err) - continue - } - c.ClearSomeNodeOwner(&newNodeNames) - klog.Errorf("get node %s error: %v", nodeInfo.NodeName, err) - return nil, nil, err - } - if !c.AllowNodeOwnbyMulticluster { - mu.Lock() - if len(nodeOwnerMap) > 0 { - if nodeOwner, existed := nodeOwnerMap[nodeInfo.NodeName]; existed && len(nodeOwner) > 0 { - if nodeOwner != cluster.Name { - continue - } - } else { - newNodeNames = append(newNodeNames, nodeInfo.NodeName) - } - } else { - newNodeNames = append(newNodeNames, nodeInfo.NodeName) - } - allNodeNamesMap[nodeInfo.NodeName] = struct{}{} - nodeOwnerMap[nodeInfo.NodeName] = cluster.Name - mu.Unlock() - } - leafModel := v1alpha1.LeafModel{ - LeafNodeName: nodeInfo.NodeName, - Taints: []corev1.Taint{ - { - Effect: utils.KosmosNodeTaintEffect, - Key: utils.KosmosNodeTaintKey, - Value: utils.KosmosNodeValue, - }, - }, - NodeSelector: v1alpha1.NodeSelector{ - NodeName: nodeInfo.NodeName, - }, - } - leafModels = append(leafModels, leafModel) - } - klog.V(7).Infof("all new node in cluster %s: %v", cluster.Name, newNodeNames) - klog.V(7).Infof("all node in cluster %s: %v", cluster.Name, allNodeNamesMap) - cluster.Spec.ClusterTreeOptions.LeafModels = leafModels - - return &newNodeNames, &allNodeNamesMap, nil -} - -func (c *KosmosJoinController) CreateOrUpdateCluster(_ context.Context, request reconcile.Request, - kosmosClient versioned.Interface, k8sClient kubernetes.Interface, newNodeNames *[]string, - allNodeNamesMap *map[string]struct{}, cluster *v1alpha1.Cluster) error { - old, err := kosmosClient.KosmosV1alpha1().Clusters().Get(context.TODO(), cluster.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - _, err = kosmosClient.KosmosV1alpha1().Clusters().Create(context.TODO(), cluster, metav1.CreateOptions{}) - if err != nil { - c.ClearSomeNodeOwner(newNodeNames) - return fmt.Errorf("create cluster %s failed: %v", cluster.Name, err) - } - } else { - c.ClearSomeNodeOwner(newNodeNames) - return fmt.Errorf("create cluster %s failed when get it first: %v", cluster.Name, err) - } - klog.Infof("Cluster %s for %s/%s has been created.", cluster.Name, request.Namespace, request.Name) - } else { - cluster.ResourceVersion = old.GetResourceVersion() - _, err = kosmosClient.KosmosV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{}) - if err != nil { - c.ClearSomeNodeOwner(newNodeNames) - return fmt.Errorf("update cluster %s failed: %v", cluster.Name, err) - } - - k8sNodesList, err := k8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("list %s's k8s nodes error: %v", cluster.Name, err) - } - // clear node, delete some node not in new VirtualCluster.spec.PromoteResources.Nodes - for _, node := range k8sNodesList.Items { - if _, ok := (*allNodeNamesMap)[node.Name]; !ok { - // if existed node not in map, it should be deleted - err := k8sClient.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("delete %s's k8s nodes error: %v", cluster.Name, err) - } - // clear ndoe's owner - mu.Lock() - nodeOwnerMap[node.Name] = "" - mu.Unlock() - } - } - klog.Infof("Cluster %s for %s/%s has been updated.", cluster.Name, request.Namespace, request.Name) - } - return nil -} - -func (c *KosmosJoinController) CreateCluster(ctx context.Context, request reconcile.Request, vc *v1alpha1.VirtualCluster) error { - kubeconfigStream, err := base64.StdEncoding.DecodeString(vc.Spec.Kubeconfig) - if err != nil { - return fmt.Errorf("decode target kubernetes kubeconfig %s err: %v", vc.Spec.Kubeconfig, err) - } - kosmosClient, k8sClient, k8sExtensionsClient, err := c.InitTargetKubeclient(kubeconfigStream) - if err != nil { - return fmt.Errorf("crd kubernetes client failed: %v", err) - } - - // create crd cluster.kosmos.io - klog.Infof("Attempting to create kosmos-clustertree CRDs for virtualcluster %s/%s...", request.Namespace, request.Name) - for _, crdToCreate := range []string{file.ServiceImport, file.Cluster, - file.ServiceExport, file.ClusterPodConvert, file.PodConvert} { - crdObject, err := kosmosctl.GenerateCustomResourceDefinition(crdToCreate, nil) - if err != nil { - return err - } - _, err = k8sExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.Background(), crdObject, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("create CRD %s for virtualcluster %s/%s failed: %v", - crdObject.Name, request.Namespace, request.Name, err) - } - klog.Warningf("CRD %s is existed, creation process will skip", crdObject.Name) - } else { - klog.Infof("Create CRD %s for virtualcluster %s/%s successful.", crdObject.Name, request.Namespace, request.Name) - } - } - - // construct cluster.kosmos.io cr - clusterName := fmt.Sprintf("virtualcluster-%s-%s", request.Namespace, request.Name) - klog.Infof("Attempting to create cluster %s for %s/%s ...", clusterName, request.Namespace, request.Name) - - cluster := v1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - }, - Spec: v1alpha1.ClusterSpec{ - Kubeconfig: c.KubeconfigStream, - Namespace: request.Namespace, - ClusterLinkOptions: &v1alpha1.ClusterLinkOptions{ - Enable: false, - NetworkType: v1alpha1.NetWorkTypeGateWay, - IPFamily: v1alpha1.IPFamilyTypeALL, - }, - ClusterTreeOptions: &v1alpha1.ClusterTreeOptions{ - Enable: true, - }, - }, - } - - hostK8sClient, err := utils.NewClientFromBytes(c.KubeconfigStream) - if err != nil { - return fmt.Errorf("crd kubernetes client failed: %v", err) - } - - newNodeNames, allNodeNamesMap, err := c.CreateClusterObject(ctx, request, vc, hostK8sClient, &cluster) - if err != nil { - return err - } - - // use client-go to create or update cluster.kosmos.io cr - err = c.CreateOrUpdateCluster(ctx, request, kosmosClient, k8sClient, newNodeNames, allNodeNamesMap, &cluster) - if err != nil { - return err - } - - return nil -} - -func (c *KosmosJoinController) AddFinalizer(ctx context.Context, vc *v1alpha1.VirtualCluster) error { - vcNew := vc.DeepCopy() - if controllerutil.AddFinalizer(vcNew, constants.VirtualClusterFinalizerName) { - err := c.Update(ctx, vcNew) - if err != nil { - return fmt.Errorf("add finalizer error for virtualcluster %s: %v", vc.Name, err) - } - } - klog.Infof("add finalizer for virtualcluster %s", vc.Name) - return nil -} - -func (c *KosmosJoinController) RemoveFinalizer(ctx context.Context, vc *v1alpha1.VirtualCluster) error { - vcNew := vc.DeepCopy() - if controllerutil.ContainsFinalizer(vcNew, constants.VirtualClusterFinalizerName) { - controllerutil.RemoveFinalizer(vcNew, constants.VirtualClusterFinalizerName) - err := c.Update(ctx, vcNew) - if err != nil { - return fmt.Errorf("remove finalizer error for virtualcluster %s: %v", vc.Name, err) - } - } - klog.Infof("remove finalizer for virtualcluster %s", vc.Name) - return nil -} - -func (c *KosmosJoinController) InstallClusterTree(ctx context.Context, request reconcile.Request, vc *v1alpha1.VirtualCluster) error { - klog.Infof("Start creating kosmos-clustertree in namespace %s", request.Namespace) - defer klog.Infof("Finish creating kosmos-clustertree in namespace %s", request.Namespace) - if err := c.DeployKosmos(ctx, request, vc); err != nil { - return fmt.Errorf("deploy kosmos error: %v", err) - } - if err := c.CreateCluster(ctx, request, vc); err != nil { - return fmt.Errorf("create cluster error: %v", err) - } - if err := c.AddFinalizer(ctx, vc); err != nil { - return fmt.Errorf("add finalizer error: %v", err) - } - return nil -} - -func (c *KosmosJoinController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ %s starts to reconcile %s ============", constants.KosmosJoinControllerName, request.Name) - defer klog.V(4).Infof("============ %s reconcile finish %s ============", constants.KosmosJoinControllerName, request.Name) - if !c.AllowNodeOwnbyMulticluster { - once.Do(c.InitNodeOwnerMap) - } - var vc v1alpha1.VirtualCluster - - if err := c.Get(ctx, request.NamespacedName, &vc); err != nil { - if apierrors.IsNotFound(err) { - return reconcile.Result{}, nil - } - klog.Errorf("get %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - if vc.DeletionTimestamp.IsZero() { - if vc.Status.Phase != v1alpha1.Completed { - klog.Infof("cluster's status is %s, skip", vc.Status.Phase) - return reconcile.Result{}, nil - } - err := c.InstallClusterTree(ctx, request, &vc) - if err != nil { - klog.Errorf("install %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - } else { - err := c.UninstallClusterTree(ctx, request, &vc) - if err != nil { - klog.Errorf("uninstall %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - } - - return reconcile.Result{}, nil -} - -func (c *KosmosJoinController) SetupWithManager(mgr manager.Manager) error { - if c.Client == nil { - c.Client = mgr.GetClient() - } - - skipFunc := func(obj client.Object) bool { - // skip reservedNS - return obj.GetNamespace() != utils.ReservedNS - } - - return ctrl.NewControllerManagedBy(mgr). - Named(constants.KosmosJoinControllerName). - WithOptions(controller.Options{}). - For(&v1alpha1.VirtualCluster{}, builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return skipFunc(createEvent.Object) - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { - if !skipFunc(updateEvent.ObjectNew) { - return true - } - newObj := updateEvent.ObjectNew.(*v1alpha1.VirtualCluster) - oldObj := updateEvent.ObjectOld.(*v1alpha1.VirtualCluster) - - if !newObj.DeletionTimestamp.IsZero() { - return true - } - - return !reflect.DeepEqual(newObj.Spec, oldObj.Spec) || - newObj.Status.Phase != oldObj.Status.Phase - }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return skipFunc(deleteEvent.Object) - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - // TODO - return false - }, - })). - Complete(c) -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/env/env.go b/pkg/kubenest/controller/virtualcluster.node.controller/env/env.go deleted file mode 100644 index 10d3046d9..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/env/env.go +++ /dev/null @@ -1,158 +0,0 @@ -package util - -import ( - "encoding/base64" - "fmt" - "os" - "strconv" - "strings" - - "k8s.io/klog" -) - -func GetExectorTmpPath() string { - tmpPath := os.Getenv("EXECTOR_TMP_PATH") - if len(tmpPath) == 0 { - tmpPath = "/apps/conf/kosmos/tmp" - } - return tmpPath -} - -func GetKubeletKubeConfigName() string { - kubeletKubeConfigName := os.Getenv("KUBELET_KUBE_CONFIG_NAME") - if len(kubeletKubeConfigName) == 0 { - // env.sh KUBELET_KUBE_CONFIG_NAME - kubeletKubeConfigName = "kubelet.conf" - } - return kubeletKubeConfigName -} - -func GetKubeletConfigName() string { - kubeletConfigName := os.Getenv("KUBELET_CONFIG_NAME") - if len(kubeletConfigName) == 0 { - // env.sh KUBELET_CONFIG_NAME - kubeletConfigName = "config.yaml" - } - return kubeletConfigName -} - -func GetExectorWorkerDir() string { - exectorWorkDir := os.Getenv("EXECTOR_WORKER_PATH") - if len(exectorWorkDir) == 0 { - exectorWorkDir = "/etc/vc-node-dir/" - } - return exectorWorkDir -} - -func GetExectorShellName() string { - shellName := os.Getenv("EXECTOR_SHELL_NAME") - - if len(shellName) == 0 { - shellName = "kubelet_node_helper.sh" - } - return shellName -} - -func GetExectorShellEnvName() string { - shellName := os.Getenv("EXECTOR_SHELL_ENV_NAME") - - if len(shellName) == 0 { - shellName = "env.sh" - } - return shellName -} - -func GetExectorShellPath() string { - exectorWorkDir := GetExectorWorkerDir() - shellVersion := GetExectorShellName() - - return fmt.Sprintf("%s%s", exectorWorkDir, shellVersion) -} - -func GetExectorShellEnvPath() string { - exectorWorkDir := GetExectorWorkerDir() - shellVersion := GetExectorShellEnvName() - - return fmt.Sprintf("%s%s", exectorWorkDir, shellVersion) -} - -func GetExectorHostMasterNodeIP() string { - hostIP := os.Getenv("EXECTOR_HOST_MASTER_NODE_IP") - if len(hostIP) == 0 { - klog.Fatal("EXECTOR_HOST_MASTER_NODE_IP is none") - } - return hostIP -} - -// tobke = base64(`username:password`) -func GetExectorToken() string { - username := os.Getenv("WEB_USER") - password := os.Getenv("WEB_PASS") - token := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) - if len(token) == 0 { - klog.Fatal("EXECTOR_SHELL_TOKEN is none") - } - return token -} - -func GetExectorPort() string { - exectorPort := os.Getenv("EXECTOR_SERVER_PORT") - if len(exectorPort) == 0 { - exectorPort = "5678" - } - return exectorPort -} - -func GetDrainWaitSeconds() int { - drainWaitSeconds := os.Getenv("EXECTOR_DRAIN_WAIT_SECONDS") - if len(drainWaitSeconds) == 0 { - drainWaitSeconds = "60" - } - num, err := strconv.Atoi(drainWaitSeconds) - - if err != nil { - klog.Fatalf("convert EXECTOR_DRAIN_WAIT_SECONDS failed, err: %s", err) - } - - return num -} - -func GetControlPlaneLabel() string { - controllPlaneLabel := os.Getenv("CONTROL_PLANE_LABEL") - if len(controllPlaneLabel) == 0 { - controllPlaneLabel = "node-role.kubernetes.io/control-plane" - } - return controllPlaneLabel -} - -func GetWaitNodeReadTime() int { - readTimeSeconds := os.Getenv("WAIT_NODE_READ_TIME") - if len(readTimeSeconds) == 0 { - readTimeSeconds = "30" - } - num, err := strconv.Atoi(readTimeSeconds) - if err != nil { - klog.Fatalf("convert WAIT_NODE_READ_TIME failed, err: %s", err) - } - return num -} - -func GetNodeTaskMaxGoroutines() int { - maxGoroutines := os.Getenv("NODE_TASK_MAX_GOROUTINES") - if len(maxGoroutines) == 0 { - maxGoroutines = "10" - } - num, err := strconv.Atoi(maxGoroutines) - if err != nil { - klog.Fatalf("convert NODE_TASK_MAX_GOROUTINES failed, err: %s", err) - } - return num -} - -func GetCMDPaths() []string { - cmdAbsolutePaths := os.Getenv("CMD_ABSOLUTE_PATHS") - if len(cmdAbsolutePaths) == 0 { - return nil - } - return strings.Split(cmdAbsolutePaths, ",") -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go deleted file mode 100644 index 3df3767e8..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go +++ /dev/null @@ -1,186 +0,0 @@ -package exector - -import ( - "crypto/tls" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/gorilla/websocket" - "k8s.io/klog/v2" - - env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type Status int - -const ( - SUCCESS Status = iota - FAILED -) - -const ( - NotFoundText = "127" -) - -// nolint:revive -type ExectorReturn struct { - Status Status - Reason string - LastLog string - Text string - Code int -} - -func (r *ExectorReturn) String() string { - return fmt.Sprintf("%d, %s, %s, %d", r.Status, r.Reason, r.LastLog, r.Code) -} - -// nolint:revive -type Exector interface { - GetWebSocketOption() WebSocketOption - SendHandler(conn *websocket.Conn, done <-chan struct{}, interrupt chan struct{}, result *ExectorReturn) -} - -// nolint:revive -type ExectorHelper struct { - Token string - Addr string -} - -func (h *ExectorHelper) createWebsocketConnection(opt WebSocketOption) (*websocket.Conn, *http.Response, error) { - u := url.URL{Scheme: "wss", Host: h.Addr, Path: opt.Path, RawQuery: url.PathEscape(opt.RawQuery)} - // nolint - dl := websocket.Dialer{TLSClientConfig: &tls.Config{RootCAs: nil, InsecureSkipVerify: true}} - - return dl.Dial(u.String(), http.Header{ - "Authorization": []string{"Basic " + h.Token}, - }) -} - -type WebSocketOption struct { - Path string - Addr string - RawQuery string -} - -func (h *ExectorHelper) DoExector(stopCh <-chan struct{}, exector Exector) *ExectorReturn { - ret := h.DoExectorReal(stopCh, exector) - if ret.Text == NotFoundText { - // try to update shell script - srcEnvFile := env.GetExectorShellEnvPath() - klog.V(4).Infof("exector: src file path %s", srcEnvFile) - - scpEnvExector := &SCPExector{ - DstFilePath: ".", - DstFileName: env.GetExectorShellEnvName(), - SrcFile: srcEnvFile, - } - - srcShellFile := env.GetExectorShellPath() - klog.V(4).Infof("exector: src file path %s", srcShellFile) - - scpShellExector := &SCPExector{ - DstFilePath: ".", - DstFileName: env.GetExectorShellName(), - SrcFile: srcShellFile, - } - - if ret := h.DoExectorReal(stopCh, scpEnvExector); ret.Status == SUCCESS { - if ret := h.DoExectorReal(stopCh, scpShellExector); ret.Status == SUCCESS { - return h.DoExectorReal(stopCh, exector) - } - } else { - return ret - } - } - return ret -} - -func (h *ExectorHelper) DoExectorReal(stopCh <-chan struct{}, exector Exector) *ExectorReturn { - // default is error - result := &ExectorReturn{ - FAILED, "init exector return status", "", "", 0, - } - - // nolint - conn, _, err := h.createWebsocketConnection(exector.GetWebSocketOption()) - if err != nil { - result.Reason = err.Error() - return result - } - defer conn.Close() - - done := make(chan struct{}) - interrupt := make(chan struct{}) - - go exector.SendHandler(conn, done, interrupt, result) - - go func() { - defer close(done) - for { - _, message, err := conn.ReadMessage() - if len(message) > 0 { - klog.V(4).Infof("recv: %s", string(message)) - } - if err != nil { - klog.V(4).Infof("read: %s", err) - cerr, ok := err.(*websocket.CloseError) - if ok { - if cerr.Text == "0" { - result.Status = SUCCESS - result.Reason = "success" - } else if cerr.Text == NotFoundText { - result.Status = FAILED - result.Reason = "command not found" - result.Text = cerr.Text - } - result.Code = cerr.Code - } else { - result.Reason = err.Error() - } - return - } - // klog.V(4).Infof("recv: %s", string(message)) - // last - result.LastLog = result.LastLog + string(message) - } - }() - - for { - select { - case <-stopCh: // finished circulate when stopCh is closed - close(interrupt) - case <-interrupt: - err := conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - result.Reason = err.Error() - return result - } - select { - case <-done: - case <-time.After(time.Second): - } - return result - case <-done: - return result - } - } -} - -func NewExectorHelper(addr string, port string) *ExectorHelper { - var exectorPort string - if len(port) == 0 { - exectorPort = env.GetExectorPort() - } else { - exectorPort = port - } - - token := env.GetExectorToken() - return &ExectorHelper{ - Token: token, - Addr: utils.GenerateAddrStr(addr, exectorPort), - } -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_check.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_check.go deleted file mode 100644 index 25da806ef..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_check.go +++ /dev/null @@ -1,20 +0,0 @@ -package exector - -import ( - "github.com/gorilla/websocket" -) - -type CheckExector struct { - Port string -} - -func (e *CheckExector) GetWebSocketOption() WebSocketOption { - rawQuery := "port=" + e.Port - return WebSocketOption{ - Path: "check/", - RawQuery: rawQuery, - } -} - -func (e *CheckExector) SendHandler(_ *websocket.Conn, _ <-chan struct{}, _ chan struct{}, _ *ExectorReturn) { -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go deleted file mode 100644 index 98a9aed2b..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go +++ /dev/null @@ -1,44 +0,0 @@ -package exector - -import ( - "fmt" - "strings" - - "github.com/gorilla/websocket" - - env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" -) - -type CMDExector struct { - Cmd string -} - -func AddPrefix(cmd string) string { - cmdAbsolutePaths := env.GetCMDPaths() - if len(cmdAbsolutePaths) == 0 { - return cmd - } - for _, cmdAbsolutePath := range cmdAbsolutePaths { - if strings.HasSuffix(cmdAbsolutePath, fmt.Sprintf("/%s", cmd)) { - return cmdAbsolutePath - } - } - return cmd -} - -func (e *CMDExector) GetWebSocketOption() WebSocketOption { - cmdArgs := strings.Split(e.Cmd, " ") - command := cmdArgs[0] - rawQuery := "command=" + AddPrefix(command) - if len(cmdArgs) > 1 { - args := cmdArgs[1:] - rawQuery = rawQuery + "&args=" + strings.Join(args, "&args=") - } - return WebSocketOption{ - Path: "cmd/", - RawQuery: rawQuery, - } -} - -func (e *CMDExector) SendHandler(_ *websocket.Conn, _ <-chan struct{}, _ chan struct{}, _ *ExectorReturn) { -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go deleted file mode 100644 index 8ace685c7..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go +++ /dev/null @@ -1,88 +0,0 @@ -package exector - -import ( - "bufio" - "fmt" - "os" - - "github.com/gorilla/websocket" - "k8s.io/klog/v2" -) - -type SCPExector struct { - DstFilePath string - DstFileName string - SrcFile string - SrcByte []byte -} - -func (e *SCPExector) GetWebSocketOption() WebSocketOption { - return WebSocketOption{ - Path: "upload/", - RawQuery: fmt.Sprintf("file_name=%s&&file_path=%s", e.DstFileName, e.DstFilePath), - } -} - -func (e *SCPExector) SendHandler(conn *websocket.Conn, done <-chan struct{}, interrupt chan struct{}, result *ExectorReturn) { - errHandler := func(err error) { - klog.V(4).Infof("write: %s", err) - result.Reason = err.Error() - close(interrupt) - } - - send := func(data []byte) error { - err := conn.WriteMessage(websocket.BinaryMessage, []byte(data)) - if err != nil { - return err - } - return nil - } - - if len(e.SrcByte) > 0 { - if err := send(e.SrcByte); err != nil { - errHandler(err) - return - } - } else { - file, err := os.Open(e.SrcFile) - if err != nil { - errHandler(err) - return - } - defer file.Close() - - bufferSize := 1024 - buffer := make([]byte, bufferSize) - - reader := bufio.NewReader(file) - for { - select { - case <-interrupt: - return - case <-done: - return - default: - } - n, err := reader.Read(buffer) - if err != nil { - // check if EOF - if err.Error() == "EOF" { - break - } - errHandler(err) - return - } - dataToSend := buffer[:n] - - if err := send(dataToSend); err != nil { - errHandler(err) - return - } - } - } - - if err := send([]byte("EOF")); err != nil { - errHandler(err) - return - } -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go b/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go deleted file mode 100644 index 44c052b89..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go +++ /dev/null @@ -1,386 +0,0 @@ -package vcnodecontroller - -import ( - "context" - "fmt" - "sync" - - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" - "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/workflow" - "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type NodeController struct { - client.Client - RootClientSet kubernetes.Interface - EventRecorder record.EventRecorder - KosmosClient versioned.Interface - Options *v1alpha1.KubeNestConfiguration - sem chan struct{} -} - -func NewNodeController(client client.Client, RootClientSet kubernetes.Interface, EventRecorder record.EventRecorder, KosmosClient versioned.Interface, options *v1alpha1.KubeNestConfiguration) *NodeController { - r := NodeController{ - Client: client, - RootClientSet: RootClientSet, - EventRecorder: EventRecorder, - KosmosClient: KosmosClient, - Options: options, - sem: make(chan struct{}, env.GetNodeTaskMaxGoroutines()), - } - return &r -} - -func (r *NodeController) SetupWithManager(mgr manager.Manager) error { - if r.Client == nil { - r.Client = mgr.GetClient() - } - - skipEvent := func(_ client.Object) bool { - return true - } - - return ctrl.NewControllerManagedBy(mgr). - Named(constants.NodeControllerName). - WithOptions(controller.Options{}). - For(&v1alpha1.VirtualCluster{}, builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return skipEvent(createEvent.Object) - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { - return skipEvent(updateEvent.ObjectNew) - }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return skipEvent(deleteEvent.Object) - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - return skipEvent(genericEvent.Object) - }, - })). - Complete(r) -} - -func hasItemInArray(name string, f func(string) bool) bool { - return f(name) -} - -func (r *NodeController) compareAndTranformNodes(ctx context.Context, targetNodes []v1alpha1.NodeInfo, actualNodes []v1.Node) ([]v1alpha1.GlobalNode, []v1alpha1.GlobalNode, error) { - unjoinNodes := make([]v1alpha1.GlobalNode, 0) - joinNodes := make([]v1alpha1.GlobalNode, 0) - - globalNodes := &v1alpha1.GlobalNodeList{} - if err := r.Client.List(ctx, globalNodes); err != nil { - return nil, nil, fmt.Errorf("failed to list global nodes: %v", err) - } - - // cacheMap := map[string]string{} - for _, targetNode := range targetNodes { - has := hasItemInArray(targetNode.NodeName, func(name string) bool { - for _, actualNode := range actualNodes { - if actualNode.Name == name { - return true - } - } - return false - }) - - if !has { - globalNode, ok := util.FindGlobalNode(targetNode.NodeName, globalNodes.Items) - if !ok { - return nil, nil, fmt.Errorf("global node %s not found", targetNode.NodeName) - } - joinNodes = append(joinNodes, *globalNode) - } - } - - for _, actualNode := range actualNodes { - has := hasItemInArray(actualNode.Name, func(name string) bool { - for _, targetNode := range targetNodes { - if targetNode.NodeName == name { - return true - } - } - return false - }) - - if !has { - globalNode, ok := util.FindGlobalNode(actualNode.Name, globalNodes.Items) - if !ok { - return nil, nil, fmt.Errorf("global node %s not found", actualNode.Name) - } - unjoinNodes = append(unjoinNodes, *globalNode) - } - } - - return unjoinNodes, joinNodes, nil -} - -func (r *NodeController) UpdateVirtualClusterStatus(ctx context.Context, virtualCluster v1alpha1.VirtualCluster, status v1alpha1.Phase, reason string) error { - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - var targetObj v1alpha1.VirtualCluster - if err := r.Get(ctx, types.NamespacedName{Name: virtualCluster.Name, Namespace: virtualCluster.Namespace}, &targetObj); err != nil { - klog.Warningf("get target virtualcluster %s namespace %s failed: %v", virtualCluster.Name, virtualCluster.Namespace, err) - return err - } - updateVirtualCluster := targetObj.DeepCopy() - if len(status) > 0 { - updateVirtualCluster.Status.Phase = status - } - updateVirtualCluster.Status.Reason = reason - updateTime := metav1.Now() - updateVirtualCluster.Status.UpdateTime = &updateTime - if _, err := r.KosmosClient.KosmosV1alpha1().VirtualClusters(updateVirtualCluster.Namespace).Update(ctx, updateVirtualCluster, metav1.UpdateOptions{}); err != nil && !apierrors.IsNotFound(err) { - klog.Warningf("update target virtualcluster %s namespace %s failed: %v", virtualCluster.Name, virtualCluster.Namespace, err) - return err - } - return nil - }) - - if retryErr != nil { - return fmt.Errorf("update virtualcluster %s status namespace %s failed: %s", virtualCluster.Name, virtualCluster.Namespace, retryErr) - } - - r.EventRecorder.Event(&virtualCluster, v1.EventTypeWarning, "VCStatusPending", fmt.Sprintf("Name: %s, Namespace: %s, reason: %s", virtualCluster.Name, virtualCluster.Namespace, reason)) - - return nil -} - -func (r *NodeController) DoNodeTask(ctx context.Context, virtualCluster v1alpha1.VirtualCluster) error { - k8sClient, err := util.GenerateKubeclient(&virtualCluster) - if err != nil { - return fmt.Errorf("virtualcluster %s crd kubernetes client failed: %v", virtualCluster.Name, err) - } - - nodes, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("virtualcluster %s get virtual-cluster nodes list failed: %v", virtualCluster.Name, err) - } - - // compare cr and actual nodes in k8s - unjoinNodes, joinNodes, err := r.compareAndTranformNodes(ctx, virtualCluster.Spec.PromoteResources.NodeInfos, nodes.Items) - if err != nil { - return fmt.Errorf("compare cr and actual nodes failed, virtual-cluster-name: %v, err: %s", virtualCluster.Name, err) - } - - if len(unjoinNodes) > 0 || len(joinNodes) > 0 { - if virtualCluster.Status.Phase != v1alpha1.Initialized && virtualCluster.Status.Phase != v1alpha1.Deleting { - if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Updating, "node task"); err != nil { - return err - } - } - } - if len(unjoinNodes) > 0 { - // unjoin node - if err := r.unjoinNode(ctx, unjoinNodes, virtualCluster, k8sClient); err != nil { - return fmt.Errorf("virtualcluster %s unjoin node failed: %v", virtualCluster.Name, err) - } - } - if len(joinNodes) > 0 { - // join node - if err := r.joinNode(ctx, joinNodes, virtualCluster, k8sClient); err != nil { - return fmt.Errorf("virtualcluster %s join node failed: %v", virtualCluster.Name, err) - } - } - - if len(unjoinNodes) > 0 || len(joinNodes) > 0 { - newStatus := v1alpha1.AllNodeReady - if virtualCluster.Status.Phase == v1alpha1.Deleting { - newStatus = v1alpha1.AllNodeDeleted - } - if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, newStatus, "node ready"); err != nil { - return err - } - } - return nil -} - -func (r *NodeController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ virtual-cluster-node-controller start to reconcile %s ============", request.NamespacedName) - defer klog.V(4).Infof("============ virtual-cluster-node-controller finish to reconcile %s ============", request.NamespacedName) - - // check virtual cluster nodes - var virtualCluster v1alpha1.VirtualCluster - if err := r.Get(ctx, request.NamespacedName, &virtualCluster); err != nil { - if apierrors.IsNotFound(err) { - klog.V(4).Infof("virtual-cluster-node-controller: can not found %s", request.NamespacedName) - return reconcile.Result{}, nil - } - klog.Errorf("get clusternode %s error: %v", request.NamespacedName, err) - if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Pending, err.Error()); err != nil { - klog.Errorf("update virtualcluster %s status error: %v", request.NamespacedName, err) - } - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - if !virtualCluster.GetDeletionTimestamp().IsZero() && virtualCluster.Status.Phase != v1alpha1.Deleting { - klog.V(4).Info("virtualcluster %s is deleting, skip node controller", virtualCluster.Name) - return reconcile.Result{}, nil - } - - if !virtualCluster.GetDeletionTimestamp().IsZero() && len(virtualCluster.Spec.Kubeconfig) == 0 { - if err := r.DoNodeClean(ctx, virtualCluster); err != nil { - klog.Errorf("virtualcluster %s do node clean failed: %v", virtualCluster.Name, err) - if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Pending, err.Error()); err != nil { - klog.Errorf("update virtualcluster %s status error: %v", request.NamespacedName, err) - } - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - return reconcile.Result{}, nil - } - - if virtualCluster.Status.Phase == v1alpha1.Preparing { - klog.V(4).Infof("virtualcluster wait cluster ready, cluster name: %s", virtualCluster.Name) - return reconcile.Result{}, nil - } - - if virtualCluster.Status.Phase == v1alpha1.Pending { - klog.V(4).Infof("virtualcluster is pending, cluster name: %s", virtualCluster.Name) - return reconcile.Result{}, nil - } - - if len(virtualCluster.Spec.Kubeconfig) == 0 { - klog.Warning("virtualcluster.spec.kubeconfig is nil, wait virtualcluster control-plane ready.") - return reconcile.Result{}, nil - } - - if err := r.DoNodeTask(ctx, virtualCluster); err != nil { - klog.Errorf("virtualcluster %s do node task failed: %v", virtualCluster.Name, err) - if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Pending, err.Error()); err != nil { - klog.Errorf("update virtualcluster %s status error: %v", request.NamespacedName, err) - } - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } - - return reconcile.Result{}, nil -} - -func (r *NodeController) DoNodeClean(ctx context.Context, virtualCluster v1alpha1.VirtualCluster) error { - targetNodes := virtualCluster.Spec.PromoteResources.NodeInfos - globalNodes := &v1alpha1.GlobalNodeList{} - - if err := r.Client.List(ctx, globalNodes); err != nil { - return fmt.Errorf("failed to list global nodes: %v", err) - } - - cleanNodeInfos := []v1alpha1.GlobalNode{} - - for _, targetNode := range targetNodes { - globalNode, ok := util.FindGlobalNode(targetNode.NodeName, globalNodes.Items) - if !ok { - return fmt.Errorf("global node %s not found", targetNode.NodeName) - } - cleanNodeInfos = append(cleanNodeInfos, *globalNode) - } - - return r.cleanGlobalNode(ctx, cleanNodeInfos, virtualCluster, nil) -} - -func (r *NodeController) cleanGlobalNode(ctx context.Context, nodeInfos []v1alpha1.GlobalNode, virtualCluster v1alpha1.VirtualCluster, _ kubernetes.Interface) error { - return r.BatchProcessNodes(nodeInfos, func(nodeInfo v1alpha1.GlobalNode) error { - return workflow.NewCleanNodeWorkFlow().RunTask(ctx, task.TaskOpt{ - NodeInfo: nodeInfo, - VirtualCluster: virtualCluster, - HostClient: r.Client, - HostK8sClient: r.RootClientSet, - Opt: r.Options, - }) - }) -} - -func (r *NodeController) joinNode(ctx context.Context, nodeInfos []v1alpha1.GlobalNode, virtualCluster v1alpha1.VirtualCluster, k8sClient kubernetes.Interface) error { - if len(nodeInfos) == 0 { - return nil - } - - clusterDNS := "" - dnssvc, err := k8sClient.CoreV1().Services(constants.SystemNs).Get(ctx, constants.KubeDNSSVCName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("get kube-dns service failed: %s", err) - } - clusterDNS = dnssvc.Spec.ClusterIP - - return r.BatchProcessNodes(nodeInfos, func(nodeInfo v1alpha1.GlobalNode) error { - return workflow.NewJoinWorkFlow().RunTask(ctx, task.TaskOpt{ - NodeInfo: nodeInfo, - VirtualCluster: virtualCluster, - KubeDNSAddress: clusterDNS, - HostClient: r.Client, - HostK8sClient: r.RootClientSet, - VirtualK8sClient: k8sClient, - Opt: r.Options, - }) - }) -} - -func (r *NodeController) unjoinNode(ctx context.Context, nodeInfos []v1alpha1.GlobalNode, virtualCluster v1alpha1.VirtualCluster, k8sClient kubernetes.Interface) error { - return r.BatchProcessNodes(nodeInfos, func(nodeInfo v1alpha1.GlobalNode) error { - return workflow.NewUnjoinWorkFlow().RunTask(ctx, task.TaskOpt{ - NodeInfo: nodeInfo, - VirtualCluster: virtualCluster, - HostClient: r.Client, - HostK8sClient: r.RootClientSet, - VirtualK8sClient: k8sClient, - Opt: r.Options, - }) - }) -} - -func (r *NodeController) BatchProcessNodes(nodeInfos []v1alpha1.GlobalNode, f func(v1alpha1.GlobalNode) error) error { - var wg sync.WaitGroup - errChan := make(chan error, len(nodeInfos)) - - for _, nodeInfo := range nodeInfos { - wg.Add(1) - r.sem <- struct{}{} - go func(nodeInfo v1alpha1.GlobalNode) { - defer wg.Done() - defer func() { <-r.sem }() - if err := f(nodeInfo); err != nil { - errChan <- fmt.Errorf("[%s] batchprocessnodes failed: %s", nodeInfo.Name, err) - } - }(nodeInfo) - } - - wg.Wait() - close(errChan) - - var taskErr error - for err := range errChan { - if err != nil { - if taskErr == nil { - taskErr = err - } else { - taskErr = errors.Wrap(err, taskErr.Error()) - } - } - } - - if taskErr != nil { - return taskErr - } - - return nil -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/logger.go b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/logger.go deleted file mode 100644 index d48113de6..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/logger.go +++ /dev/null @@ -1,47 +0,0 @@ -package task - -import ( - "fmt" - - "k8s.io/klog/v2" -) - -type PrefixedLogger struct { - level klog.Verbose - prefix string -} - -func NewPrefixedLogger(level klog.Verbose, prefix string) *PrefixedLogger { - return &PrefixedLogger{level: level, prefix: prefix} -} - -func (p *PrefixedLogger) Info(args ...interface{}) { - if p.level.Enabled() { - klog.InfoDepth(1, append([]interface{}{p.prefix}, args...)...) - } -} - -func (p *PrefixedLogger) Infof(format string, args ...interface{}) { - if p.level.Enabled() { - klog.InfoDepth(1, fmt.Sprintf(p.prefix+format, args...)) - } -} - -func (p *PrefixedLogger) Warn(args ...interface{}) { - if p.level.Enabled() { - klog.WarningDepth(1, append([]interface{}{p.prefix}, args...)...) - } -} -func (p *PrefixedLogger) Warnf(format string, args ...interface{}) { - if p.level.Enabled() { - klog.WarningDepth(1, fmt.Sprintf(p.prefix+format, args...)) - } -} - -func (p *PrefixedLogger) Error(args ...interface{}) { - klog.ErrorDepth(1, append([]interface{}{p.prefix}, args...)...) -} - -func (p *PrefixedLogger) Errorf(format string, args ...interface{}) { - klog.ErrorDepth(1, fmt.Sprintf(p.prefix+format, args...)) -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go deleted file mode 100644 index ff549b0e1..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go +++ /dev/null @@ -1,582 +0,0 @@ -package task - -import ( - "context" - "encoding/base64" - "fmt" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" - "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/exector" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -// nolint:revive -type TaskOpt struct { - NodeInfo v1alpha1.GlobalNode - VirtualCluster v1alpha1.VirtualCluster - KubeDNSAddress string - - HostClient client.Client - HostK8sClient kubernetes.Interface - VirtualK8sClient kubernetes.Interface - - Opt *v1alpha1.KubeNestConfiguration - logger *PrefixedLogger -} - -func (to *TaskOpt) Loger() *PrefixedLogger { - if to.logger == nil { - to.logger = NewPrefixedLogger(klog.V(4), fmt.Sprintf("[%s] ", to.NodeInfo.Name)) - } - return to.logger -} - -type Task struct { - Name string - Run func(context.Context, TaskOpt, interface{}) (interface{}, error) - Retry bool - SubTasks []Task - Skip func(context.Context, TaskOpt) bool - ErrorIgnore bool -} - -func NewCheckEnvTask() Task { - return Task{ - Name: "remote environment check", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - // check - checkCmd := &exector.CMDExector{ - Cmd: fmt.Sprintf("bash %s check", env.GetExectorShellName()), - } - ret := exectHelper.DoExector(ctx.Done(), checkCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("check node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -func NewKubeadmResetTask() Task { - return Task{ - Name: "remote kubeadm reset", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - resetCmd := &exector.CMDExector{ - Cmd: fmt.Sprintf("bash %s unjoin", env.GetExectorShellName()), - } - - ret := exectHelper.DoExector(ctx.Done(), resetCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("reset node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -// nolint:dupl -func NewDrainHostNodeTask() Task { - return Task{ - Name: "drain host node", - Retry: true, - Skip: func(ctx context.Context, opt TaskOpt) bool { - if opt.Opt != nil { - return opt.Opt.KubeInKubeConfig.ForceDestroy - } - return false - }, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - targetNode, err := to.HostK8sClient.CoreV1().Nodes().Get(ctx, to.NodeInfo.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, fmt.Errorf("get node %s failed: %s", to.NodeInfo.Name, err) - } - - if err := util.DrainNode(ctx, targetNode.Name, to.HostK8sClient, targetNode, env.GetDrainWaitSeconds(), true); err != nil { - klog.Warningf("drain node %s failed: %s, will force delete node", to.NodeInfo.Name, err) - return nil, err - } - return nil, nil - }, - } -} - -// nolint:dupl -func NewDrainVirtualNodeTask() Task { - return Task{ - Name: "drain virtual-control-plane node", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - targetNode, err := to.VirtualK8sClient.CoreV1().Nodes().Get(ctx, to.NodeInfo.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, fmt.Errorf("get node %s failed: %s", to.NodeInfo.Name, err) - } - - if err := util.DrainNode(ctx, targetNode.Name, to.VirtualK8sClient, targetNode, env.GetDrainWaitSeconds(), false); err != nil { - klog.Warningf("drain node %s failed: %s, will force delete node", to.NodeInfo.Name, err) - return nil, nil - } - return nil, nil - }, - } -} - -func NewCleanHostClusterNodeTask() Task { - return Task{ - Name: "clean host cluster node", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - targetNode := &v1.Node{} - if err := to.HostClient.Get(ctx, types.NamespacedName{ - Name: to.NodeInfo.Name, - }, targetNode); err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, fmt.Errorf("get target node %s failed: %s", to.NodeInfo.Name, err) - } - - if err := to.HostClient.Delete(ctx, targetNode); err != nil { - return nil, err - } - - return nil, nil - }, - } -} - -func NewReomteUploadCATask() Task { - return Task{ - Name: "remote upload ca.crt", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - nn := types.NamespacedName{ - Namespace: to.VirtualCluster.Namespace, - Name: fmt.Sprintf("%s-cert", to.VirtualCluster.Name), - } - targetCert := &v1.Secret{} - if err := to.HostClient.Get(ctx, nn, targetCert); err != nil { - return nil, fmt.Errorf("get target cert %s failed: %s", nn, err) - } - - cacrt := targetCert.Data["ca.crt"] - scpCrtCmd := &exector.SCPExector{ - DstFilePath: env.GetExectorTmpPath(), - DstFileName: "ca.crt", - SrcByte: cacrt, - } - ret := exectHelper.DoExector(ctx.Done(), scpCrtCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("scp ca.crt to node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -func NewRemoteUpdateKubeletConfTask() Task { - return Task{ - Name: "remote upload kubelet.conf", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - kubeconfig, err := base64.StdEncoding.DecodeString(to.VirtualCluster.Spec.Kubeconfig) - if err != nil { - return nil, fmt.Errorf("decode target kubeconfig %s failed: %s", to.VirtualCluster.Name, err) - } - - scpKCCmd := &exector.SCPExector{ - DstFilePath: env.GetExectorTmpPath(), - DstFileName: env.GetKubeletKubeConfigName(), - SrcByte: kubeconfig, - } - ret := exectHelper.DoExector(ctx.Done(), scpKCCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("scp kubeconfig to node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -func NewRemoteUpdateConfigYamlTask() Task { - return Task{ - Name: "remote upload config.yaml", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - scpKubeletConfigCmd := &exector.SCPExector{ - DstFilePath: env.GetExectorTmpPath(), - DstFileName: env.GetKubeletConfigName(), - SrcFile: env.GetExectorWorkerDir() + env.GetKubeletConfigName(), - } - - ret := exectHelper.DoExector(ctx.Done(), scpKubeletConfigCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("scp kubelet config to node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -func NewRemoteNodeJoinTask() Task { - return Task{ - Name: "remote join node to virtual control plane", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - baseCmd := fmt.Sprintf("bash %s join %s", env.GetExectorShellName(), to.KubeDNSAddress) - if to.VirtualCluster.Spec.KubeInKubeConfig != nil && to.VirtualCluster.Spec.KubeInKubeConfig.UseNodeLocalDNS { - baseCmd = fmt.Sprintf("bash %s join %s %s", env.GetExectorShellName(), to.KubeDNSAddress, constants.NodeLocalDNSIp) - } - joinCmd := &exector.CMDExector{ - Cmd: baseCmd, - } - to.Loger().Infof("join node %s with cmd: %s", to.NodeInfo.Name, joinCmd.Cmd) - ret := exectHelper.DoExector(ctx.Done(), joinCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("join node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -func NewWaitNodeReadyTask(isHost bool) Task { - return Task{ - Name: "wait new node ready", - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - isReady := false - - waitFunc := func(timeout time.Duration) { - waitCtx, cancel := context.WithTimeout(ctx, timeout) // total waiting time - defer cancel() - wait.UntilWithContext(waitCtx, func(ctx context.Context) { - client := to.VirtualK8sClient - if isHost { - client = to.HostK8sClient - } - - node, err := client.CoreV1().Nodes().Get(waitCtx, to.NodeInfo.Name, metav1.GetOptions{}) - if err == nil { - if util.IsNodeReady(node.Status.Conditions) { - to.Loger().Infof("node %s is ready", to.NodeInfo.Name) - isReady = true - cancel() - } else { - to.Loger().Infof("node %s is not ready, status: %s", to.NodeInfo.Name, node.Status.Phase) - } - } else { - to.Loger().Infof("get node %s failed: %s", to.NodeInfo.Name, err) - } - }, 10*time.Second) // Interval time - } - - waitFunc(time.Duration(env.GetWaitNodeReadTime()) * time.Second) - - if isReady { - return nil, nil - } - - // try to restart containerd and kubelet - to.Loger().Infof("try to restart containerd and kubelet on node: %s", to.NodeInfo.Name) - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - restartContainerdCmd := &exector.CMDExector{ - Cmd: "systemctl restart containerd", - } - ret := exectHelper.DoExector(ctx.Done(), restartContainerdCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("cannot restart containerd: %s", ret.String()) - } - - restartKubeletCmd := &exector.CMDExector{ - Cmd: "systemctl restart kubelet", - } - ret = exectHelper.DoExector(ctx.Done(), restartKubeletCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("cannot restart kubelet: %s", ret.String()) - } - - to.Loger().Infof("wait for the node to be ready again. %s", to.NodeInfo.Name) - waitFunc(time.Duration(env.GetWaitNodeReadTime()*2) * time.Second) - - if isReady { - return nil, nil - } - - return nil, fmt.Errorf("node %s is not ready", to.NodeInfo.Name) - }, - } -} - -func NewInstallLBTask() Task { - return Task{ - Name: "remote install load balancer", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - joinCmd := &exector.CMDExector{ - Cmd: fmt.Sprintf("bash %s install_lb", env.GetExectorShellName()), - } - to.Loger().Infof("install nginx %s with cmd: %s", to.NodeInfo.Name, joinCmd.Cmd) - ret := exectHelper.DoExector(ctx.Done(), joinCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("nstall nginx %s failed: %s", to.NodeInfo.Name, ret.String()) - } - return nil, nil - }, - } -} - -// nolint:dupl -func NewUpdateVirtualNodeLabelsTask() Task { - return Task{ - Name: "update new-node labels", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - node, err := to.VirtualK8sClient.CoreV1().Nodes().Get(ctx, to.NodeInfo.Name, metav1.GetOptions{}) - if err != nil { - to.Loger().Infof("get node %s failed: %s", to.NodeInfo.Name, err) - return err - } - - updateNode := node.DeepCopy() - for k, v := range to.NodeInfo.Spec.Labels { - updateNode.Labels[k] = v - } - - // add free label - updateNode.Labels[constants.StateLabelKey] = string(v1alpha1.NodeInUse) - - if _, err := to.VirtualK8sClient.CoreV1().Nodes().Update(ctx, updateNode, metav1.UpdateOptions{}); err != nil { - to.Loger().Infof("add label to node %s failed: %s", to.NodeInfo.Name, err) - return err - } - return nil - }) - - return nil, err - }, - } -} - -// nolint:dupl -func NewUpdateHostNodeLabelsTask() Task { - return Task{ - Name: "update host-node labels", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - node, err := to.HostK8sClient.CoreV1().Nodes().Get(ctx, to.NodeInfo.Name, metav1.GetOptions{}) - if err != nil { - to.Loger().Infof("get node %s failed: %s", to.NodeInfo.Name, err) - return err - } - - updateNode := node.DeepCopy() - for k, v := range to.NodeInfo.Spec.Labels { - updateNode.Labels[k] = v - } - - // add free label - updateNode.Labels[constants.StateLabelKey] = string(v1alpha1.NodeFreeState) - - if _, err := to.HostK8sClient.CoreV1().Nodes().Update(ctx, updateNode, metav1.UpdateOptions{}); err != nil { - to.Loger().Infof("add label to node %s failed: %s", to.NodeInfo.Name, err) - return err - } - return nil - }) - - return nil, err - }, - } -} - -func NewUpdateNodePoolItemStatusTask(nodeState v1alpha1.NodeState, isClean bool) Task { - return Task{ - Name: "Update node status in NodePool ", - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - targetGlobalNode := v1alpha1.GlobalNode{} - - if err := to.HostClient.Get(ctx, types.NamespacedName{Name: to.NodeInfo.Name}, &targetGlobalNode); err != nil { - to.Loger().Errorf("get global node %s failed: %s", to.NodeInfo.Name, err) - return err - } - - updateGlobalNode := targetGlobalNode.DeepCopy() - - updateGlobalNode.Spec.State = nodeState - if err := to.HostClient.Update(ctx, updateGlobalNode); err != nil { - to.Loger().Errorf("update global node %s spec.state failed: %s", updateGlobalNode.Name, err) - return err - } - if isClean { - updateGlobalNode.Status.VirtualCluster = "" - if err := to.HostClient.Status().Update(ctx, updateGlobalNode); err != nil { - to.Loger().Errorf("update global node %s status failed: %s", updateGlobalNode.Name, err) - return err - } - } - return nil - }) - - return nil, err - }, - } -} - -func NewRemoveNodeFromVirtualTask() Task { - return Task{ - Name: "remove node from virtual control-plane", - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - err := to.VirtualK8sClient.CoreV1().Nodes().Delete(ctx, to.NodeInfo.Name, metav1.DeleteOptions{}) - if err != nil { - return nil, fmt.Errorf("remove node from cluster failed, node name:%s, erro: %s", to.NodeInfo.Name, err) - } - return nil, nil - }, - } -} - -func NewExecShellUnjoinCmdTask() Task { - return Task{ - Name: "exec shell unjoin cmd", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - - resetCmd := &exector.CMDExector{ - Cmd: fmt.Sprintf("bash %s unjoin", env.GetExectorShellName()), - } - - ret := exectHelper.DoExector(ctx.Done(), resetCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("reset node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - - return nil, nil - }, - } -} - -func getJoinCmdStr(log string) (string, error) { - strs := strings.Split(log, "kubeadm join") - if len(strs) != 2 { - return "", fmt.Errorf("get join cmd str failed") - } - return fmt.Sprintf("kubeadm join %s", strings.TrimSpace(strs[1])), nil -} - -func getJoinCmdArgs(joinCmdStr string) (string, string, string, error) { - strs := strings.Split(joinCmdStr, " ") - if len(strs) != 7 { - return "", "", "", fmt.Errorf("invalid join cmd str: %s", joinCmdStr) - } - return strings.TrimSpace(strs[2]), strings.TrimSpace(strs[4]), strings.TrimSpace(strs[6]), nil -} - -func NewJoinNodeToHostCmd() Task { - return Task{ - Name: "join node to host", - SubTasks: []Task{ - NewGetJoinNodeToHostCmdTask(), - NewExecJoinNodeToHostCmdTask(), - NewWaitNodeReadyTask(true), - }, - } -} - -func NewGetJoinNodeToHostCmdTask() Task { - return Task{ - Name: "remote get host node join cmd str", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) { - masterNodeIP := env.GetExectorHostMasterNodeIP() - hostExectorHelper := exector.NewExectorHelper(masterNodeIP, "") - joinCmdStrCmd := &exector.CMDExector{ - Cmd: "kubeadm token create --print-join-command", - } - ret := hostExectorHelper.DoExector(ctx.Done(), joinCmdStrCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("get host join cmd on node %s failed: %s", to.NodeInfo.Name, ret.String()) - } - - joinCmdStr, err := getJoinCmdStr(ret.LastLog) - if err != nil { - return nil, err - } - return joinCmdStr, nil - }, - } -} - -func NewExecJoinNodeToHostCmdTask() Task { - return Task{ - Name: "remote join node to host", - Retry: true, - Run: func(ctx context.Context, to TaskOpt, args interface{}) (interface{}, error) { - // check - _, err := to.HostK8sClient.CoreV1().Nodes().Get(ctx, to.NodeInfo.Name, metav1.GetOptions{}) - if err == nil { - to.Loger().Info("node already joined, skip task") - return nil, nil - } - if !apierrors.IsNotFound(err) { - return nil, fmt.Errorf("query node %s failed, the error is %s", to.NodeInfo.Name, err.Error()) - } - - joinCmdStr, ok := args.(string) - if !ok { - return nil, fmt.Errorf("get join cmd str failed") - } - host, token, certHash, err := getJoinCmdArgs(joinCmdStr) - if err != nil { - return nil, err - } - joinCmd := &exector.CMDExector{ - Cmd: fmt.Sprintf("bash %s revert %s %s %s", env.GetExectorShellName(), host, token, certHash), - } - - exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "") - ret := exectHelper.DoExector(ctx.Done(), joinCmd) - if ret.Status != exector.SUCCESS { - return nil, fmt.Errorf("exec join cmd on node %s failed: %s, join cmd: %s", to.NodeInfo.Name, ret.String(), joinCmdStr) - } - return nil, nil - }, - } -} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go deleted file mode 100644 index e8eba31b5..000000000 --- a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go +++ /dev/null @@ -1,125 +0,0 @@ -package workflow - -import ( - "context" - "time" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task" -) - -const ( - retryCount = 0 - maxRetries = 5 -) - -// nolint:revive -type WorkflowData struct { - Tasks []task.Task -} - -func RunWithRetry(ctx context.Context, task task.Task, opt task.TaskOpt, preArgs interface{}) (interface{}, error) { - i := retryCount - var err error - var args interface{} - for ; i < maxRetries; i++ { - if args, err = task.Run(ctx, opt, preArgs); err != nil { - if !task.Retry { - break - } - waitTime := 3 * (i + 1) - opt.Loger().Warnf("work flow retry %d after %ds, task name: %s, err: %s", i, waitTime, task.Name, err) - time.Sleep(time.Duration(waitTime) * time.Second) - } else { - break - } - } - if err != nil { - if task.ErrorIgnore { - opt.Loger().Warnf("work flow ignore err, task name: %s, err: %s", task.Name, err) - return nil, nil - } - opt.Loger().Warnf("work flow interrupt, task name: %s, err: %s", task.Name, err) - return nil, err - } - return args, nil -} - -// nolint:revive -func (w WorkflowData) RunTask(ctx context.Context, opt task.TaskOpt) error { - var args interface{} - for i, t := range w.Tasks { - opt.Loger().Infof("HHHHHHHHHHHH (%d/%d) work flow run task %s HHHHHHHHHHHH", i+1, len(w.Tasks), t.Name) - if t.Skip != nil && t.Skip(ctx, opt) { - opt.Loger().Infof("work flow skip task %s", t.Name) - continue - } - if len(t.SubTasks) > 0 { - for j, subTask := range t.SubTasks { - opt.Loger().Infof("HHHHHHHHHHHH (%d/%d) work flow run sub task %s HHHHHHHHHHHH", j+1, len(t.SubTasks), subTask.Name) - if t.Skip != nil && t.Skip(ctx, opt) { - opt.Loger().Infof("work flow skip sub task %s", t.Name) - continue - } - - if nextArgs, err := RunWithRetry(ctx, subTask, opt, args); err != nil { - return err - } else { - args = nextArgs - } - } - } else { - if nextArgs, err := RunWithRetry(ctx, t, opt, args); err != nil { - return err - } else { - args = nextArgs - } - } - } - return nil -} - -func NewJoinWorkFlow() WorkflowData { - joinTasks := []task.Task{ - task.NewCheckEnvTask(), - task.NewDrainHostNodeTask(), - task.NewKubeadmResetTask(), - task.NewCleanHostClusterNodeTask(), - task.NewReomteUploadCATask(), - task.NewRemoteUpdateKubeletConfTask(), - task.NewRemoteUpdateConfigYamlTask(), - task.NewRemoteNodeJoinTask(), - task.NewWaitNodeReadyTask(false), - task.NewInstallLBTask(), - task.NewUpdateVirtualNodeLabelsTask(), - task.NewUpdateNodePoolItemStatusTask(v1alpha1.NodeInUse, false), - } - - return WorkflowData{ - Tasks: joinTasks, - } -} - -func NewUnjoinWorkFlow() WorkflowData { - unjoinTasks := []task.Task{ - task.NewCheckEnvTask(), - task.NewDrainVirtualNodeTask(), - task.NewRemoveNodeFromVirtualTask(), - task.NewExecShellUnjoinCmdTask(), - task.NewJoinNodeToHostCmd(), - task.NewUpdateHostNodeLabelsTask(), - task.NewUpdateNodePoolItemStatusTask(v1alpha1.NodeFreeState, true), - } - return WorkflowData{ - Tasks: unjoinTasks, - } -} - -func NewCleanNodeWorkFlow() WorkflowData { - cleanNodeTasks := []task.Task{ - task.NewUpdateNodePoolItemStatusTask(v1alpha1.NodeFreeState, true), - } - return WorkflowData{ - Tasks: cleanNodeTasks, - } -} diff --git a/pkg/kubenest/controller/virtualcluster_execute_controller.go b/pkg/kubenest/controller/virtualcluster_execute_controller.go deleted file mode 100644 index 11c7478ac..000000000 --- a/pkg/kubenest/controller/virtualcluster_execute_controller.go +++ /dev/null @@ -1,85 +0,0 @@ -package controller - -import ( - "fmt" - - "github.com/pkg/errors" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -type Executor struct { - client.Client - virtualCluster *v1alpha1.VirtualCluster - phase *workflow.Phase - config *rest.Config -} - -func NewExecutor(virtualCluster *v1alpha1.VirtualCluster, c client.Client, config *rest.Config, kubeNestOptions *v1alpha1.KubeNestConfiguration) (*Executor, error) { - var phase *workflow.Phase - - opts := []kubenest.InitOpt{ - kubenest.NewInitOptWithVirtualCluster(virtualCluster), - kubenest.NewInitOptWithKubeconfig(config), - kubenest.NewInitOptWithKubeNestOptions(kubeNestOptions), - } - options := kubenest.NewPhaseInitOptions(opts...) - action := recognizeActionFor(virtualCluster) - switch action { - case constants.InitAction: - phase = kubenest.NewInitPhase(options) - case constants.DeInitAction: - phase = kubenest.UninstallPhase(options) - default: - return nil, fmt.Errorf("failed to recognize action for virtual cluster %s", virtualCluster.Name) - } - - return &Executor{ - virtualCluster: virtualCluster, - Client: c, - phase: phase, - config: config, - }, nil -} - -func (e *Executor) Execute() error { - klog.InfoS("Start execute the workflow", "workflow", "virtual cluster", klog.KObj(e.virtualCluster)) - - if err := e.phase.Run(); err != nil { - klog.ErrorS(err, "failed to executed the workflow", "workflow", "virtual cluster", klog.KObj(e.virtualCluster)) - return errors.Wrap(err, "failed to executed the workflow") - } - klog.InfoS("Successfully executed the workflow", "workflow", "virtual cluster", klog.KObj(e.virtualCluster)) - return nil -} - -//func (e *Executor) afterRunPhase() error { -// localClusterClient, err := clientset.NewForConfig(e.config) -// if err != nil { -// return fmt.Errorf("error when creating local cluster client, err: %w", err) -// } -// secret, err := localClusterClient.CoreV1().Secrets(e.virtualCluster.GetNamespace()).Get(context.TODO(), -// fmt.Sprintf("%s-%s", e.virtualCluster.GetName(), constants.AdminConfig), metav1.GetOptions{}) -// if err != nil { -// return err -// } -// -// kubeconfigBytes := secret.Data[constants.restConfig] -// configString := base64.StdEncoding.EncodeToString(kubeconfigBytes) -// e.virtualCluster.Spec.Kubeconfig = configString -// e.virtualCluster.Status.Phase = v1alpha1.Completed -// return e.Client.Update(context.TODO(), e.virtualCluster) -//} - -func recognizeActionFor(virtualCluster *v1alpha1.VirtualCluster) constants.Action { - if !virtualCluster.DeletionTimestamp.IsZero() { - return constants.DeInitAction - } - return constants.InitAction -} diff --git a/pkg/kubenest/controller/virtualcluster_init_controller.go b/pkg/kubenest/controller/virtualcluster_init_controller.go deleted file mode 100644 index 7c3545f38..000000000 --- a/pkg/kubenest/controller/virtualcluster_init_controller.go +++ /dev/null @@ -1,1088 +0,0 @@ -package controller - -import ( - "context" - "encoding/base64" - "fmt" - "sort" - "sync" - "time" - - "github.com/pkg/errors" - "gopkg.in/yaml.v3" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" - "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/exector" - "github.com/kosmos.io/kosmos/pkg/kubenest/tasks" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -type VirtualClusterInitController struct { - client.Client - Config *rest.Config - EventRecorder record.EventRecorder - RootClientSet kubernetes.Interface - KosmosClient versioned.Interface - lock sync.Mutex - KubeNestOptions *v1alpha1.KubeNestConfiguration -} - -type NodePool struct { - Address string `json:"address" yaml:"address"` - Labels map[string]string `json:"labels" yaml:"labels"` - Cluster string `json:"cluster" yaml:"cluster"` - State string `json:"state" yaml:"state"` -} - -type HostPortPool struct { - PortsPool []int32 `yaml:"portsPool"` -} - -type VipPool struct { - Vips []string `yaml:"vipPool"` -} - -const ( - VirtualClusterControllerFinalizer = "kosmos.io/virtualcluster-controller" - RequeueTime = 10 * time.Second -) - -var nameMap = map[string]int{ - "agentport": 1, - "serverport": 2, - "healthport": 3, - "adminport": 4, -} - -func (c *VirtualClusterInitController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - startTime := time.Now() - klog.V(4).InfoS("Started syncing virtual cluster", "virtual cluster", request, "startTime", startTime) - defer func() { - klog.V(4).InfoS("Finished syncing virtual cluster", "virtual cluster", request, "duration", time.Since(startTime)) - }() - - originalCluster := &v1alpha1.VirtualCluster{} - if err := c.Get(ctx, request.NamespacedName, originalCluster); err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).InfoS("Virtual Cluster has been deleted", "Virtual Cluster", request) - return reconcile.Result{}, nil - } - return reconcile.Result{RequeueAfter: RequeueTime}, nil - } - updatedCluster := originalCluster.DeepCopy() - updatedCluster.Status.Reason = "" - - //The object is being deleted - if !originalCluster.DeletionTimestamp.IsZero() { - if len(updatedCluster.Spec.PromoteResources.NodeInfos) > 0 { - updatedCluster.Spec.PromoteResources.NodeInfos = nil - updatedCluster.Status.Phase = v1alpha1.Deleting - err := c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s status to %s", updatedCluster.Name, updatedCluster.Status.Phase) - return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } - return reconcile.Result{}, nil - } - - if updatedCluster.Status.Phase == v1alpha1.AllNodeDeleted { - err := c.destroyVirtualCluster(updatedCluster) - if err != nil { - klog.Errorf("Destroy virtual cluter %s failed. err: %s", updatedCluster.Name, err.Error()) - return reconcile.Result{}, errors.Wrapf(err, "Destroy virtual cluter %s failed. err: %s", updatedCluster.Name, err.Error()) - } - return c.removeFinalizer(updatedCluster) - } else if updatedCluster.Status.Phase == v1alpha1.Deleting { - klog.V(2).InfoS("Virtual Cluster is deleting, wait for event 'AllNodeDeleted'", "Virtual Cluster", request) - return reconcile.Result{}, nil - } - return c.removeFinalizer(updatedCluster) - } - - switch originalCluster.Status.Phase { - case "": - //create request - updatedCluster.Status.Phase = v1alpha1.Preparing - err := c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s status, err: %v", updatedCluster.Name, err) - return reconcile.Result{RequeueAfter: RequeueTime}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } - - err = c.createVirtualCluster(updatedCluster, c.KubeNestOptions) - if err != nil { - klog.Errorf("Failed to create virtualcluster %s. err: %s", updatedCluster.Name, err.Error()) - updatedCluster.Status.Reason = err.Error() - updatedCluster.Status.Phase = v1alpha1.Pending - err := c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s. err: %s", updatedCluster.Name, err.Error()) - return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } - return reconcile.Result{}, errors.Wrap(err, "Error createVirtualCluster") - } - updatedCluster.Status.Phase = v1alpha1.Initialized - err = c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s status to %s. %v", updatedCluster.Name, updatedCluster.Status.Phase, err) - return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } - case v1alpha1.AllNodeReady: - name, namespace := request.Name, request.Namespace - // check if the vc enable vip - if len(originalCluster.Status.VipMap) > 0 { - // label node for keepalived - vcClient, err := tasks.GetVcClientset(c.RootClientSet, name, namespace) - if err != nil { - klog.Errorf("Get vc client failed. err: %s", err.Error()) - return reconcile.Result{}, errors.Wrapf(err, "Get vc client failed. err: %s", err.Error()) - } - reps, err := c.labelNode(vcClient) - if err != nil { - klog.Errorf("Label node for keepalived failed. err: %s", err.Error()) - return reconcile.Result{}, errors.Wrapf(err, "Label node for keepalived failed. err: %s", err.Error()) - } - klog.V(2).Infof("Label %d node for keepalived", reps) - } - - err := c.ensureAllPodsRunning(updatedCluster, constants.WaitAllPodsRunningTimeoutSeconds*time.Second) - if err != nil { - klog.Errorf("Check all pods running err: %s", err.Error()) - updatedCluster.Status.Reason = err.Error() - updatedCluster.Status.Phase = v1alpha1.Pending - } else { - updatedCluster.Status.Phase = v1alpha1.Completed - } - err = c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s status to %s", updatedCluster.Name, updatedCluster.Status.Phase) - return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } - case v1alpha1.Completed: - //update request, check if promotepolicy nodes increase or decrease. - // only 2 scenarios matched update request with status 'completed'. - // 1. node scale request, original status is 'completed'. 2. node scale process finished by NodeController, the controller changes status from 'updating' to 'completed' - policyChanged, err := c.checkPromotePoliciesChanged(updatedCluster) - if err != nil { - klog.Errorf("Error check promote policies changed. err: %s", err.Error()) - return reconcile.Result{RequeueAfter: RequeueTime}, errors.Wrapf(err, "Error checkPromotePoliciesChanged virtualcluster %s", updatedCluster.Name) - } - if !policyChanged { - return reconcile.Result{}, nil - } - err = c.assignWorkNodes(updatedCluster) - if err != nil { - return reconcile.Result{RequeueAfter: RequeueTime}, errors.Wrapf(err, "Error update virtualcluster %s", updatedCluster.Name) - } - updatedCluster.Status.Phase = v1alpha1.Updating - err = c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s status to %s", updatedCluster.Name, updatedCluster.Status.Phase) - return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } - - default: - klog.Warningf("Skip virtualcluster %s reconcile status: %s", originalCluster.Name, originalCluster.Status.Phase) - } - return c.ensureFinalizer(updatedCluster) -} - -func (c *VirtualClusterInitController) SetupWithManager(mgr manager.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr). - Named(constants.InitControllerName). - WithOptions(controller.Options{MaxConcurrentReconciles: 5}). - For(&v1alpha1.VirtualCluster{}, - builder.WithPredicates(predicate.Funcs{ - // UpdateFunc: c.onVirtualClusterUpdate, - CreateFunc: func(createEvent event.CreateEvent) bool { - return true - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { return true }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { return true }, - })). - Complete(c) -} - -func (c *VirtualClusterInitController) Update(updated *v1alpha1.VirtualCluster) error { - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - current := &v1alpha1.VirtualCluster{} - if err := c.Client.Get(context.TODO(), types.NamespacedName{ - Namespace: updated.Namespace, - Name: updated.Name, - }, current); err != nil { - klog.Errorf("get virtualcluster %s error. %v", updated.Name, err) - return err - } - now := metav1.Now() - updated.Status.UpdateTime = &now - updated.ResourceVersion = current.ResourceVersion - return c.Client.Patch(context.TODO(), updated, client.MergeFrom(current)) - }) -} - -func (c *VirtualClusterInitController) ensureFinalizer(virtualCluster *v1alpha1.VirtualCluster) (reconcile.Result, error) { - if controllerutil.ContainsFinalizer(virtualCluster, VirtualClusterControllerFinalizer) { - return reconcile.Result{}, nil - } - current := &v1alpha1.VirtualCluster{} - if err := c.Client.Get(context.TODO(), types.NamespacedName{ - Namespace: virtualCluster.Namespace, - Name: virtualCluster.Name, - }, current); err != nil { - klog.Errorf("get virtualcluster %s error. %v", virtualCluster.Name, err) - return reconcile.Result{Requeue: true}, err - } - - updated := current.DeepCopy() - controllerutil.AddFinalizer(updated, VirtualClusterControllerFinalizer) - err := c.Client.Update(context.TODO(), updated) - if err != nil { - klog.Errorf("update virtualcluster %s error. %v", virtualCluster.Name, err) - klog.Errorf("Failed to add finalizer to VirtualCluster %s/%s: %v", virtualCluster.Namespace, virtualCluster.Name, err) - return reconcile.Result{Requeue: true}, err - } - - return reconcile.Result{}, nil -} - -func (c *VirtualClusterInitController) removeFinalizer(virtualCluster *v1alpha1.VirtualCluster) (reconcile.Result, error) { - if !controllerutil.ContainsFinalizer(virtualCluster, VirtualClusterControllerFinalizer) { - return reconcile.Result{}, nil - } - - current := &v1alpha1.VirtualCluster{} - if err := c.Client.Get(context.TODO(), types.NamespacedName{ - Namespace: virtualCluster.Namespace, - Name: virtualCluster.Name, - }, current); err != nil { - klog.Errorf("get virtualcluster %s error. %v", virtualCluster.Name, err) - return reconcile.Result{Requeue: true}, err - } - updated := current.DeepCopy() - - controllerutil.RemoveFinalizer(updated, VirtualClusterControllerFinalizer) - err := c.Client.Update(context.TODO(), updated) - if err != nil { - klog.Errorf("Failed to remove finalizer to VirtualCluster %s/%s: %v", virtualCluster.Namespace, virtualCluster.Name, err) - return reconcile.Result{Requeue: true}, err - } - - return reconcile.Result{}, nil -} - -// nolint:revive -// createVirtualCluster assign work nodes, create control plane and create compoennts from manifests -func (c *VirtualClusterInitController) createVirtualCluster(virtualCluster *v1alpha1.VirtualCluster, kubeNestOptions *v1alpha1.KubeNestConfiguration) error { - klog.V(2).Infof("Reconciling virtual cluster", "name", virtualCluster.Name) - - //Assign host port - _, err := c.AllocateHostPort(virtualCluster, kubeNestOptions) - if err != nil { - return errors.Wrap(err, "Error in assign host port!") - } - // check if enable vip - vipPool, err := GetVipFromConfigMap(c.RootClientSet, constants.KosmosNs, constants.VipPoolConfigMapName, constants.VipPoolKey) - if err == nil && vipPool != nil && len(vipPool.Vips) > 0 { - klog.V(2).Infof("Enable vip for virtual cluster %s", virtualCluster.Name) - //Allocate vip - err = c.AllocateVip(virtualCluster, vipPool) - if err != nil { - return errors.Wrap(err, "Error in allocate vip!") - } - } - - executer, err := NewExecutor(virtualCluster, c.Client, c.Config, kubeNestOptions) - if err != nil { - return err - } - err = c.assignWorkNodes(virtualCluster) - if err != nil { - return errors.Wrap(err, "Error in assign work nodes") - } - klog.V(2).Infof("Successfully assigned work node for virtual cluster %s", virtualCluster.Name) - getKubeconfig := func() (string, error) { - secretName := fmt.Sprintf("%s-%s", virtualCluster.GetName(), constants.AdminConfig) - secret, err := c.RootClientSet.CoreV1().Secrets(virtualCluster.GetNamespace()).Get(context.TODO(), secretName, metav1.GetOptions{}) - if err != nil { - return "", errors.Wrapf(err, "Failed to get secret %s for virtual cluster %s", secretName, virtualCluster.GetName()) - } - return base64.StdEncoding.EncodeToString(secret.Data[constants.KubeConfig]), nil - } - err = executer.Execute() - if err != nil { - virtualCluster.Spec.Kubeconfig, _ = getKubeconfig() - return err - } - virtualCluster.Spec.Kubeconfig, err = getKubeconfig() - return err -} - -func (c *VirtualClusterInitController) destroyVirtualCluster(virtualCluster *v1alpha1.VirtualCluster) error { - klog.V(2).Infof("Destroying virtual cluster %s", virtualCluster.Name) - execute, err := NewExecutor(virtualCluster, c.Client, c.Config, c.KubeNestOptions) - if err != nil { - return err - } - return execute.Execute() -} - -func (c *VirtualClusterInitController) assignWorkNodes(virtualCluster *v1alpha1.VirtualCluster) error { - c.lock.Lock() - defer c.lock.Unlock() - globalNodeList, err := c.KosmosClient.KosmosV1alpha1().GlobalNodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("list global nodes: %w", err) - } - allNodeInfos := make([]v1alpha1.NodeInfo, 0) - globalNodes := globalNodeList.Items - sort.Slice(globalNodes, func(i, j int) bool { - return globalNodes[i].Name < globalNodes[j].Name - }) - for _, policy := range virtualCluster.Spec.PromotePolicies { - globalNodes, err := retrieveGlobalNodesWithLabelSelector(globalNodeList.Items, policy.LabelSelector) - if err != nil { - return fmt.Errorf("retrieve globalnode with labelselector: %w", err) - } - sort.Slice(globalNodes, func(i, j int) bool { - return globalNodes[i].Name < globalNodes[j].Name - }) - klog.V(4).Infof("LabelSelected Globalnode count %d", len(globalNodes)) - nodeInfos, err := c.assignNodesByPolicy(virtualCluster, policy, globalNodes) - if err != nil { - return fmt.Errorf("assign nodes by policy: %w", err) - } - allNodeInfos = append(allNodeInfos, nodeInfos...) - } - - // set all node status in usage - for _, nodeInfo := range allNodeInfos { - globalNode, ok := util.FindGlobalNode(nodeInfo.NodeName, globalNodeList.Items) - if !ok { - return fmt.Errorf("assigned node %s doesn't exist in globalnode list. this should not happen normally", nodeInfo.NodeName) - } - - // only new assigned nodes' status is not `InUse` - if globalNode.Spec.State != v1alpha1.NodeInUse { - // Note. Although we tried hard to make sure update globalNode successful in func `setGlobalNodeUsageStatus`. - // But in case of failure, some dirty data will occur because of some globalNodes have been marked `InUse`. - // But virutalcluster's NodeInfos have not been updated yet. - err = c.setGlobalNodeUsageStatus(virtualCluster, globalNode) - if err != nil { - return fmt.Errorf("set globalnode %s InUse error. %v", globalNode.Name, err) - } - - // Preventive programming. Sometimes promotePolicies may not be well-designed,not absolutely non-overlapping. - // this may lead to multiple same node in `allNodeInfos`. - globalNode.Spec.State = v1alpha1.NodeInUse - } - } - virtualCluster.Spec.PromoteResources.NodeInfos = allNodeInfos - return nil -} - -func (c *VirtualClusterInitController) checkPromotePoliciesChanged(virtualCluster *v1alpha1.VirtualCluster) (bool, error) { - globalNodeList, err := c.KosmosClient.KosmosV1alpha1().GlobalNodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return false, fmt.Errorf("list global nodes: %w", err) - } - for _, policy := range virtualCluster.Spec.PromotePolicies { - globalNodes, err := retrieveGlobalNodesWithLabelSelector(globalNodeList.Items, policy.LabelSelector) - if err != nil { - return false, fmt.Errorf("retrieve globalnode with labelselector: %w", err) - } - nodesAssigned, err := retrieveAssignedNodesByPolicy(virtualCluster, globalNodes) - if err != nil { - return false, errors.Wrapf(err, "Parse assigned nodes by policy %s error", policy.LabelSelector.String()) - } - if policy.NodeCount != int32(len(nodesAssigned)) { - klog.V(2).Infof("Promote policy node count changed from %d to %d", len(nodesAssigned), policy.NodeCount) - return true, nil - } - } - return false, nil -} - -func IsLabelsMatchSelector(selector *metav1.LabelSelector, targetLabels labels.Set) (match bool, err error) { - if selector == nil { - return true, nil - } - sel, err := metav1.LabelSelectorAsSelector(selector) - if err != nil { - return false, err - } - - match = sel.Matches(targetLabels) - return match, nil -} - -// nodesChangeCalculate calculate nodes changed when update virtualcluster. -func (c *VirtualClusterInitController) assignNodesByPolicy(virtualCluster *v1alpha1.VirtualCluster, policy v1alpha1.PromotePolicy, policyMatchedGlobalNodes []v1alpha1.GlobalNode) ([]v1alpha1.NodeInfo, error) { - nodesAssigned, err := retrieveAssignedNodesByPolicy(virtualCluster, policyMatchedGlobalNodes) - if err != nil { - return nil, fmt.Errorf("parse assigned nodes by policy %v error", policy.LabelSelector) - } - - requestNodesChanged := policy.NodeCount - int32(len(nodesAssigned)) - if requestNodesChanged == 0 { - klog.V(2).Infof("Nothing to do for policy %v", policy.LabelSelector) - return nodesAssigned, nil - } else if requestNodesChanged > 0 { - // nodes needs to increase - klog.V(2).Infof("Try allocate %d nodes for policy %v", requestNodesChanged, policy.LabelSelector) - var newAssignNodesIndex []int - for i, globalNode := range policyMatchedGlobalNodes { - if globalNode.Spec.State == v1alpha1.NodeFreeState { - newAssignNodesIndex = append(newAssignNodesIndex, i) - } - if int32(len(newAssignNodesIndex)) == requestNodesChanged { - break - } - } - if int32(len(newAssignNodesIndex)) < requestNodesChanged { - return nodesAssigned, errors.Errorf("There is not enough work nodes for promotepolicy %v. Desired %d, matched %d", policy.LabelSelector, requestNodesChanged, len(newAssignNodesIndex)) - } - for _, index := range newAssignNodesIndex { - klog.V(2).Infof("Assign node %s for virtualcluster %s policy %v", policyMatchedGlobalNodes[index].Name, virtualCluster.GetName(), policy.LabelSelector) - nodesAssigned = append(nodesAssigned, v1alpha1.NodeInfo{ - NodeName: policyMatchedGlobalNodes[index].Name, - }) - } - } else { - // nodes needs to decrease - klog.V(2).Infof("Try decrease nodes %d for policy %v", -requestNodesChanged, policy.LabelSelector) - decrease := int(-requestNodesChanged) - if len(nodesAssigned) < decrease { - return nil, errors.Errorf("Illegal work nodes decrease operation for promotepolicy %v. Desired %d, matched %d", policy.LabelSelector, decrease, len(nodesAssigned)) - } - nodesAssigned = nodesAssigned[:len(nodesAssigned)-decrease] - // note: node pool will not be modified here. NodeController will modify it when node delete success - } - return nodesAssigned, nil -} - -// retrieveAssignedNodesByPolicy retrieve nodes assigned by policy from virtual cluster spec. -// Note: this function only retrieves nodes that match the policy's label selector. -func retrieveAssignedNodesByPolicy(virtualCluster *v1alpha1.VirtualCluster, policyMatchedGlobalNodes []v1alpha1.GlobalNode) ([]v1alpha1.NodeInfo, error) { - var nodesAssignedMatchedPolicy []v1alpha1.NodeInfo - for _, nodeInfo := range virtualCluster.Spec.PromoteResources.NodeInfos { - if _, ok := util.FindGlobalNode(nodeInfo.NodeName, policyMatchedGlobalNodes); ok { - nodesAssignedMatchedPolicy = append(nodesAssignedMatchedPolicy, nodeInfo) - } - } - return nodesAssignedMatchedPolicy, nil -} - -func matchesWithLabelSelector(metaLabels labels.Set, labelSelector *metav1.LabelSelector) (bool, error) { - if labelSelector == nil { - return true, nil - } - - sel, err := metav1.LabelSelectorAsSelector(labelSelector) - if err != nil { - return false, err - } - - match := sel.Matches(metaLabels) - return match, nil -} - -func retrieveGlobalNodesWithLabelSelector(nodes []v1alpha1.GlobalNode, labelSelector *metav1.LabelSelector) ([]v1alpha1.GlobalNode, error) { - matchedNodes := make([]v1alpha1.GlobalNode, 0) - for _, node := range nodes { - matched, err := matchesWithLabelSelector(node.Spec.Labels, labelSelector) - if err != nil { - return nil, err - } - if matched { - matchedNodes = append(matchedNodes, node) - } - } - return matchedNodes, nil -} - -func (c *VirtualClusterInitController) setGlobalNodeUsageStatus(virtualCluster *v1alpha1.VirtualCluster, node *v1alpha1.GlobalNode) error { - updateSpecFunc := func() error { - current, err := c.KosmosClient.KosmosV1alpha1().GlobalNodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.Errorf("globalnode %s not found. This should not happen normally", node.Name) - // 如果节点不存在,则不执行更新并返回nil - return nil - } - return fmt.Errorf("failed to get globalNode %s: %v", node.Name, err) - } - - updated := current.DeepCopy() - updated.Spec.State = v1alpha1.NodeInUse - _, err = c.KosmosClient.KosmosV1alpha1().GlobalNodes().Update(context.TODO(), updated, metav1.UpdateOptions{}) - if err != nil { - if apierrors.IsConflict(err) { - return err - } - - klog.Errorf("failed to update globalNode spec for %s: %v", updated.Name, err) - return err - } - return nil - } - - if err := retry.RetryOnConflict(retry.DefaultRetry, updateSpecFunc); err != nil { - return err - } - - updateStatusFunc := func() error { - current, err := c.KosmosClient.KosmosV1alpha1().GlobalNodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.Errorf("globalnode %s not found. This should not happen normally", node.Name) - return nil - } - return fmt.Errorf("failed to get globalNode %s: %v", node.Name, err) - } - - updated := current.DeepCopy() - updated.Status.VirtualCluster = virtualCluster.Name - _, err = c.KosmosClient.KosmosV1alpha1().GlobalNodes().UpdateStatus(context.TODO(), updated, metav1.UpdateOptions{}) - if err != nil { - if apierrors.IsConflict(err) { - return err - } - - klog.Errorf("failed to update globalNode status for %s: %v", updated.Name, err) - return err - } - return nil - } - - return retry.RetryOnConflict(retry.DefaultRetry, updateStatusFunc) -} - -func (c *VirtualClusterInitController) ensureAllPodsRunning(virtualCluster *v1alpha1.VirtualCluster, timeout time.Duration) error { - secret, err := c.RootClientSet.CoreV1().Secrets(virtualCluster.GetNamespace()).Get(context.TODO(), - fmt.Sprintf("%s-%s", virtualCluster.GetName(), constants.AdminConfig), metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return err - } - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return err - } - - namespaceList, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "List namespaces error") - } - endTime := time.Now().Second() + int(timeout.Seconds()) - for _, namespace := range namespaceList.Items { - startTime := time.Now().Second() - if startTime > endTime { - return errors.New("Timeout waiting for all pods running") - } - klog.V(2).Infof("Check if all pods ready in namespace %s", namespace.Name) - err := wait.PollWithContext(context.TODO(), 5*time.Second, time.Duration(endTime-startTime)*time.Second, func(ctx context.Context) (done bool, err error) { - klog.V(2).Infof("Check if virtualcluster %s all deployments ready in namespace %s", virtualCluster.Name, namespace.Name) - deployList, err := clientset.AppsV1().Deployments(namespace.Name).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, errors.Wrapf(err, "Get deployment list in namespace %s error", namespace.Name) - } - for _, deploy := range deployList.Items { - if deploy.Status.AvailableReplicas != deploy.Status.Replicas { - klog.V(2).Infof("Deployment %s/%s is not ready yet. Available replicas: %d, Desired: %d. Waiting...", deploy.Name, namespace.Name, deploy.Status.AvailableReplicas, deploy.Status.Replicas) - return false, nil - } - } - - klog.V(2).Infof("Check if virtualcluster %s all statefulset ready in namespace %s", virtualCluster.Name, namespace.Name) - stsList, err := clientset.AppsV1().StatefulSets(namespace.Name).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, errors.Wrapf(err, "Get statefulset list in namespace %s error", namespace.Name) - } - for _, sts := range stsList.Items { - if sts.Status.AvailableReplicas != sts.Status.Replicas { - klog.V(2).Infof("Statefulset %s/%s is not ready yet. Available replicas: %d, Desired: %d. Waiting...", sts.Name, namespace.Name, sts.Status.AvailableReplicas, sts.Status.Replicas) - return false, nil - } - } - - klog.V(2).Infof("Check if virtualcluster %s all daemonset ready in namespace %s", virtualCluster.Name, namespace.Name) - damonsetList, err := clientset.AppsV1().DaemonSets(namespace.Name).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, errors.Wrapf(err, "Get daemonset list in namespace %s error", namespace.Name) - } - for _, daemonset := range damonsetList.Items { - if daemonset.Status.CurrentNumberScheduled != daemonset.Status.NumberReady { - klog.V(2).Infof("Daemonset %s/%s is not ready yet. Scheduled replicas: %d, Ready: %d. Waiting...", daemonset.Name, namespace.Name, daemonset.Status.CurrentNumberScheduled, daemonset.Status.NumberReady) - return false, nil - } - } - - return true, nil - }) - if err != nil { - return err - } - } - return nil -} - -func GetHostPortPoolFromConfigMap(client kubernetes.Interface, ns, cmName, dataKey string) (*HostPortPool, error) { - hostPorts, err := client.CoreV1().ConfigMaps(ns).Get(context.TODO(), cmName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - yamlData, exist := hostPorts.Data[dataKey] - if !exist { - return nil, fmt.Errorf("key '%s' not found in ConfigMap '%s'", dataKey, cmName) - } - - var hostPool HostPortPool - if err := yaml.Unmarshal([]byte(yamlData), &hostPool); err != nil { - return nil, err - } - - return &hostPool, nil -} - -func GetVipFromConfigMap(client kubernetes.Interface, ns, cmName, key string) (*VipPool, error) { - vipPoolCm, err := client.CoreV1().ConfigMaps(ns).Get(context.TODO(), cmName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - yamlData, exist := vipPoolCm.Data[key] - if !exist { - return nil, fmt.Errorf("key '%s' not found in vip pool ConfigMap '%s'", key, cmName) - } - - var vipPool VipPool - if err := yaml.Unmarshal([]byte(yamlData), &vipPool); err != nil { - return nil, err - } - - return &vipPool, nil -} - -// Return false to indicate that the port is not occupied -func (c *VirtualClusterInitController) isPortAllocated(port int32, hostAddress []string) bool { - vcList := &v1alpha1.VirtualClusterList{} - err := c.List(context.Background(), vcList) - if err != nil { - klog.Errorf("list virtual cluster error: %v", err) - return true - } - - for _, vc := range vcList.Items { - // 判断一个map是否包含某个端口 - contains := func(port int32) bool { - for _, p := range vc.Status.PortMap { - if p == port { - return true - } - } - return false - } - if vc.Status.Port == port || contains(port) { - return true - } - } - - ret, err := checkPortOnHostWithAddresses(port, hostAddress) - if err != nil { - klog.Errorf("check port on host error: %v", err) - return true - } - return ret -} - -// Return false to indicate that the port is not occupied -func checkPortOnHostWithAddresses(port int32, hostAddress []string) (bool, error) { - for _, addr := range hostAddress { - flag, err := CheckPortOnHost(addr, port) - if err != nil { - return false, err - } - if flag { - return true, nil - } - } - return false, nil -} - -// Return false to indicate that the port is not occupied -func CheckPortOnHost(addr string, port int32) (bool, error) { - hostExectorHelper := exector.NewExectorHelper(addr, "") - checkCmd := &exector.CheckExector{ - Port: fmt.Sprintf("%d", port), - } - - var ret *exector.ExectorReturn - err := apiclient.TryRunCommand(func() error { - ret = hostExectorHelper.DoExector(context.TODO().Done(), checkCmd) - if ret.Code != 1000 { - return fmt.Errorf("chekc port failed, err: %s", ret.String()) - } - return nil - }, 3) - - if err != nil { - klog.Errorf("check port on host error! addr:%s, port %d, err: %s", addr, port, err.Error()) - return true, err - } - - if ret.Status != exector.SUCCESS { - return true, fmt.Errorf("pod[%d] is occupied", port) - } - return false, nil -} - -func (c *VirtualClusterInitController) findHostAddresses() ([]string, error) { - nodes, err := c.RootClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ - LabelSelector: env.GetControlPlaneLabel(), - }) - if err != nil { - return nil, err - } - - ret := []string{} - - for _, node := range nodes.Items { - addr, err := utils.FindFirstNodeIPAddress(node, constants.PreferredAddressType) - if err != nil { - return nil, err - } - - ret = append(ret, addr) - } - return ret, nil -} - -func (c *VirtualClusterInitController) GetHostPortNextFunc(_ *v1alpha1.VirtualCluster) (func() (int32, error), error) { - var hostPool *HostPortPool - var err error - type nextfunc func() (int32, error) - var next nextfunc - hostPool, err = GetHostPortPoolFromConfigMap(c.RootClientSet, constants.KosmosNs, constants.HostPortsCMName, constants.HostPortsCMDataName) - if err != nil { - klog.Errorf("get host port pool error: %v", err) - return nil, err - } - next = func() nextfunc { - i := 0 - return func() (int32, error) { - if i >= len(hostPool.PortsPool) { - return 0, fmt.Errorf("no available ports") - } - port := hostPool.PortsPool[i] - i++ - return port, nil - } - }() - // } - return next, nil -} - -func createAPIAnpAgentSvc(name, namespace string, nameMap map[string]int) *corev1.Service { - apiAnpAgentSvc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.GetKonnectivityAPIServerName(name), - Namespace: namespace, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeNodePort, - Ports: func() []corev1.ServicePort { - ret := []corev1.ServicePort{} - for k, v := range nameMap { - ret = append(ret, corev1.ServicePort{ - Port: 8080 + int32(v), - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080 + int32(v), - }, - Name: k, - }) - } - return ret - }(), - }, - } - return apiAnpAgentSvc -} - -func (c *VirtualClusterInitController) GetNodePorts(client kubernetes.Interface, virtualCluster *v1alpha1.VirtualCluster) ([]int32, error) { - ports := make([]int32, 5) - ipFamilies := utils.IPFamilyGenerator(constants.APIServerServiceSubnet) - name := virtualCluster.GetName() - namespace := virtualCluster.GetNamespace() - apiSvc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.GetAPIServerName(name), - Namespace: namespace, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeNodePort, - Ports: []corev1.ServicePort{ - { - Port: 30007, // just for get node port - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080, // just for get node port - }, - Name: "client", - }, - }, - IPFamilies: ipFamilies, - }, - } - err := util.CreateOrUpdateService(client, apiSvc) - if err != nil { - return nil, fmt.Errorf("can not create api svc for allocate port, error: %s", err) - } - - createdAPISvc, err := client.CoreV1().Services(namespace).Get(context.TODO(), apiSvc.GetName(), metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("can not get api svc for allocate port, error: %s", err) - } - nodePort := createdAPISvc.Spec.Ports[0].NodePort - ports[0] = nodePort - - apiAnpAgentSvc := createAPIAnpAgentSvc(name, namespace, nameMap) - err = util.CreateOrUpdateService(client, apiAnpAgentSvc) - if err != nil { - return nil, fmt.Errorf("can not create anp svc for allocate port, error: %s", err) - } - - createdAnpSvc, err := client.CoreV1().Services(namespace).Get(context.TODO(), apiAnpAgentSvc.GetName(), metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("can not get api svc for allocate port, error: %s", err) - } - - for _, port := range createdAnpSvc.Spec.Ports { - v, ok := nameMap[port.Name] - if ok { - ports[v] = port.NodePort - } else { - return nil, fmt.Errorf("can not get node port for %s", port.Name) - } - } - - return ports, nil -} - -func (c *VirtualClusterInitController) GetHostNetworkPorts(virtualCluster *v1alpha1.VirtualCluster) ([]int32, error) { - next, err := c.GetHostPortNextFunc(virtualCluster) - if err != nil { - return nil, err - } - - hostAddress, err := c.findHostAddresses() - if err != nil { - return nil, err - } - - // 检查是否手动指定了 APIServerPortKey 的端口号 - var specifiedAPIServerPort int32 - if virtualCluster.Spec.KubeInKubeConfig != nil && virtualCluster.Spec.KubeInKubeConfig.ExternalPort != 0 { - specifiedAPIServerPort = virtualCluster.Spec.KubeInKubeConfig.ExternalPort - klog.V(4).InfoS("APIServerPortKey specified manually", "port", specifiedAPIServerPort) - } - - // 保存最终的分配结果 - ports := make([]int32, 0) - - // 如果手动指定了 APIServerPortKey 的端口,先检查端口是否可用 - if specifiedAPIServerPort != 0 { - // 检查手动指定的端口是否已经被占用 - if !c.isPortAllocated(specifiedAPIServerPort, hostAddress) { - ports = append(ports, specifiedAPIServerPort) // 使用手动指定的端口 - } else { - // 如果指定的端口已经被占用,则返回错误 - klog.Errorf("Specified APIServerPortKey port %d is already allocated", specifiedAPIServerPort) - return nil, fmt.Errorf("specified APIServerPortKey port %d is already allocated", specifiedAPIServerPort) - } - } - - // 从端口池中继续分配剩余的端口(确保端口数量满足要求) - for p, err := next(); err == nil; p, err = next() { - // 检查生成的端口是否被占用 - if !c.isPortAllocated(p, hostAddress) { - ports = append(ports, p) - if len(ports) >= constants.VirtualClusterPortNum { - break // 分配到足够的端口后退出 - } - } - } - - // 检查分配的端口数量是否足够 - if len(ports) < constants.VirtualClusterPortNum { - klog.Errorf("No available ports to allocate, need %d, got %d", constants.VirtualClusterPortNum, len(ports)) - return nil, fmt.Errorf("no available ports to allocate, need %d, got %d", constants.VirtualClusterPortNum, len(ports)) - } - - return ports, nil -} - -// AllocateHostPort allocate host port for virtual cluster -// #nosec G602 -func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1.VirtualCluster, _ *v1alpha1.KubeNestConfiguration) (int32, error) { - c.lock.Lock() - defer c.lock.Unlock() - if len(virtualCluster.Status.PortMap) > 0 || virtualCluster.Status.Port != 0 { - return 0, nil - } - - var ports []int32 - var err error - - if virtualCluster.Spec.KubeInKubeConfig != nil && virtualCluster.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { - ports, err = c.GetNodePorts(c.RootClientSet, virtualCluster) - } else { - ports, err = c.GetHostNetworkPorts(virtualCluster) - } - - if err != nil { - return 0, err - } - - if len(ports) < constants.VirtualClusterPortNum { - klog.Errorf("no available ports to allocate") - return 0, fmt.Errorf("no available ports to allocate") - } - virtualCluster.Status.PortMap = make(map[string]int32) - virtualCluster.Status.PortMap[constants.APIServerPortKey] = ports[0] - virtualCluster.Status.PortMap[constants.APIServerNetworkProxyAgentPortKey] = ports[1] - virtualCluster.Status.PortMap[constants.APIServerNetworkProxyServerPortKey] = ports[2] - virtualCluster.Status.PortMap[constants.APIServerNetworkProxyHealthPortKey] = ports[3] - virtualCluster.Status.PortMap[constants.APIServerNetworkProxyAdminPortKey] = ports[4] - - klog.V(4).InfoS("Success allocate virtual cluster ports", "allocate ports", ports, "vc ports", ports[:2]) - - return 0, err -} - -// AllocateVip allocate vip for virtual cluster -// nolint:revive -// #nosec G602 -func (c *VirtualClusterInitController) AllocateVip(virtualCluster *v1alpha1.VirtualCluster, vipPool *VipPool) error { - c.lock.Lock() - defer c.lock.Unlock() - if len(virtualCluster.Status.VipMap) > 0 { - return nil - } - klog.V(4).InfoS("get vip pool", "vipPool", vipPool) - externalVips := virtualCluster.Spec.KubeInKubeConfig.TenantEntrypoint.ExternalVips - // check if specified vip is available - if len(externalVips) > 0 { - if ip, err := util.IsIPAvailable(externalVips, vipPool.Vips); err != nil { - klog.Errorf("check if specified vip is available error: %v", err) - return err - } else { - klog.V(4).InfoS("specified vip is available", "vip", ip) - virtualCluster.Status.VipMap = make(map[string]string) - virtualCluster.Status.VipMap[constants.VcVipStatusKey] = ip - return nil - } - } - vcList := &v1alpha1.VirtualClusterList{} - err := c.List(context.Background(), vcList) - if err != nil { - klog.Errorf("list virtual cluster error: %v", err) - return err - } - var allocatedVips []string - for _, vc := range vcList.Items { - for _, val := range vc.Status.VipMap { - allocatedVips = append(allocatedVips, val) - } - } - - vip, err := util.FindAvailableIP(vipPool.Vips, allocatedVips) - if err != nil { - klog.Errorf("find available vip error: %v", err) - return err - } - virtualCluster.Status.VipMap = make(map[string]string) - virtualCluster.Status.VipMap[constants.VcVipStatusKey] = vip - - return err -} - -func (c *VirtualClusterInitController) labelNode(client kubernetes.Interface) (reps int, err error) { - replicas := constants.VipKeepAlivedReplicas - nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return 0, fmt.Errorf("failed to list nodes, err: %w", err) - } - if len(nodes.Items) == 0 { - return 0, fmt.Errorf("no nodes found") - } - reps = replicas - // select replicas nodes - if replicas > len(nodes.Items) { - reps = len(nodes.Items) - } - randomIndex, err := util.SecureRandomInt(reps) - if err != nil { - klog.Errorf("failed to get random index for master node, err: %v", err) - return 0, err - } - // sub reps as nodes - subNodes := nodes.Items[:reps] - masterNode := nodes.Items[randomIndex] - - // label node - for _, node := range subNodes { - currentNode := node - labels := currentNode.GetLabels() - if currentNode.Name == masterNode.Name { - // label master - labels[constants.VipKeepAlivedNodeRoleKey] = constants.VipKeepAlivedNodeRoleMaster - } else { - // label backup - labels[constants.VipKeepAlivedNodeRoleKey] = constants.VipKeepalivedNodeRoleBackup - } - labels[constants.VipKeepAlivedNodeLabelKey] = constants.VipKeepAlivedNodeLabelValue - - // update label - currentNode.SetLabels(labels) - _, err := client.CoreV1().Nodes().Update(context.TODO(), ¤tNode, metav1.UpdateOptions{}) - if err != nil { - klog.V(2).Infof("Failed to update labels for node %s: %v", currentNode.Name, err) - return 0, err - } - klog.V(2).Infof("Successfully updated labels for node %s", currentNode.Name) - } - klog.V(2).InfoS("[vip] Successfully label all node") - return reps, nil -} diff --git a/pkg/kubenest/controller/virtualcluster_init_controller_test.go b/pkg/kubenest/controller/virtualcluster_init_controller_test.go deleted file mode 100644 index 0e3a3b436..000000000 --- a/pkg/kubenest/controller/virtualcluster_init_controller_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package controller - -import ( - "fmt" - "testing" -) - -func TestNetxFunc(_ *testing.T) { - portsPool := []int32{1, 2, 3, 4, 5} - type nextfunc func() (int32, error) - // var next nextfunc - next := func() nextfunc { - i := 0 - return func() (int32, error) { - if i >= len(portsPool) { - return 0, fmt.Errorf("no available ports") - } - port := portsPool[i] - i++ - return port, nil - } - }() - - for p, err := next(); err == nil; p, err = next() { - fmt.Printf("port: %d\n", p) - } -} - -func TestCreateApiAnpServer(t *testing.T) { - var name, namespace string - apiAnpAgentSvc := createAPIAnpAgentSvc(name, namespace, nameMap) - - if len(apiAnpAgentSvc.Spec.Ports) != 4 { - t.Fatalf("apiAnpAgentSvc.Spec.Ports len != 4") - } - ports := make([]int32, 5) - for _, port := range apiAnpAgentSvc.Spec.Ports { - v, ok := nameMap[port.Name] - if ok { - ports[v] = port.Port - } else { - t.Fatalf("can not get node port for %s", port.Name) - } - } - - if ports[1] != 8081 { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[0].Port != 8081") - } - - if ports[2] != 8082 { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[1].Port != 8082") - } - - if ports[3] != 8083 { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Port != 8083") - } - - if ports[4] != 8084 { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Port != 8084") - } -} diff --git a/pkg/kubenest/controlplane/apiserver.go b/pkg/kubenest/controlplane/apiserver.go deleted file mode 100644 index ef763fa5a..000000000 --- a/pkg/kubenest/controlplane/apiserver.go +++ /dev/null @@ -1,85 +0,0 @@ -package controlplane - -import ( - "fmt" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/apiserver" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureVirtualClusterAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - if err := installAPIServer(client, name, namespace, portMap, kubeNestConfiguration, vc); err != nil { - return fmt.Errorf("failed to install virtual cluster apiserver, err: %w", err) - } - return nil -} - -func DeleteVirtualClusterAPIServer(client clientset.Interface, name, namespace string) error { - deployName := util.GetAPIServerName(name) - if err := util.DeleteDeployment(client, deployName, namespace); err != nil { - return errors.Wrapf(err, "Failed to delete deployment %s/%s", deployName, namespace) - } - return nil -} - -func installAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - imageRepository, imageVersion := util.GetImageMessage() - clusterIP, err := util.GetEtcdServiceClusterIP(namespace, name+constants.EtcdSuffix, client) - if err != nil { - return nil - } - - vclabel := util.GetVirtualControllerLabel() - - IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) - if err != nil { - return err - } - - apiserverDeploymentBytes, err := util.ParseTemplate(apiserver.ApiserverDeployment, struct { - DeploymentName, Namespace, ImageRepository, EtcdClientService, Version, VirtualControllerLabel string - ServiceSubnet, VirtualClusterCertsSecret, EtcdCertsSecret string - Replicas int - EtcdListenClientPort int32 - ClusterPort int32 - AdmissionPlugins bool - IPV6First bool - UseAPIServerNodePort bool - }{ - DeploymentName: util.GetAPIServerName(name), - Namespace: namespace, - ImageRepository: imageRepository, - Version: imageVersion, - VirtualControllerLabel: vclabel, - EtcdClientService: clusterIP, - ServiceSubnet: constants.APIServerServiceSubnet, - VirtualClusterCertsSecret: util.GetCertName(name), - EtcdCertsSecret: util.GetEtcdCertName(name), - Replicas: kubeNestConfiguration.KubeInKubeConfig.APIServerReplicas, - EtcdListenClientPort: constants.APIServerEtcdListenClientPort, - ClusterPort: portMap[constants.APIServerPortKey], - IPV6First: IPV6FirstFlag, - AdmissionPlugins: kubeNestConfiguration.KubeInKubeConfig.AdmissionPlugins, - UseAPIServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort, - }) - if err != nil { - return fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) - } - - apiserverDeployment := &appsv1.Deployment{} - if err := yaml.Unmarshal([]byte(apiserverDeploymentBytes), apiserverDeployment); err != nil { - return fmt.Errorf("error when decoding virtual cluster apiserver deployment: %w", err) - } - - if err := util.CreateOrUpdateDeployment(client, apiserverDeployment); err != nil { - return fmt.Errorf("error when creating deployment for %s, err: %w", apiserverDeployment.Name, err) - } - return nil -} diff --git a/pkg/kubenest/controlplane/component.go b/pkg/kubenest/controlplane/component.go deleted file mode 100644 index ab7fb33ae..000000000 --- a/pkg/kubenest/controlplane/component.go +++ /dev/null @@ -1,205 +0,0 @@ -package controlplane - -import ( - "fmt" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - controller "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/kubecontroller" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/scheduler" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureControlPlaneComponent(component, name, namespace string, client clientset.Interface, clusterCIDR string) error { - configMaps, err := getComponentConfigMapManifests(name, namespace) - if err != nil { - return err - } - configMap, ok := configMaps[constants.VirtualClusterSchedulerComponentConfigMap] - if !ok { - klog.Infof("Skip installing component configMap %s(%s/%s)", component, namespace, name) - return nil - } - - if err := util.CreateOrUpdateConfigMap(client, configMap); err != nil { - return fmt.Errorf("failed to create configMap resource for component %s, err: %w", component, err) - } - - deployments, err := getComponentManifests(name, namespace, clusterCIDR) - if err != nil { - return err - } - - deployment, ok := deployments[component] - if !ok { - klog.Infof("Skip installing component %s(%s/%s)", component, namespace, name) - return nil - } - - if err := util.CreateOrUpdateDeployment(client, deployment); err != nil { - return fmt.Errorf("failed to create deployment resource for component %s, err: %w", component, err) - } - - return nil -} - -func DeleteControlPlaneComponent(component, virtualclusterName, namespace string, client clientset.Interface) error { - var deployName string - if component == constants.KubeControllerManagerComponent { - deployName = fmt.Sprintf("%s-%s", virtualclusterName, "kube-controller-manager") - } else if component == constants.VirtualClusterSchedulerComponent { - deployName = fmt.Sprintf("%s-%s", virtualclusterName, "virtualcluster-scheduler") - } else { - return errors.Errorf("Unknow deployment %s ", component) - } - - if err := util.DeleteDeployment(client, deployName, namespace); err != nil { - return errors.Wrapf(err, "Failed to delete deployment %s/%s", deployName, namespace) - } - - configmaps := getComponentConfigmaps(component) - for _, configmap := range configmaps { - if err := util.DeleteConfigmap(client, configmap, namespace); err != nil { - return errors.Wrapf(err, "Failed to delete configmap %s/%s", configmap, namespace) - } - } - return nil -} - -func getComponentManifests(name, namespace, clusterCIDR string) (map[string]*appsv1.Deployment, error) { - kubeControllerManager, err := getKubeControllerManagerManifest(name, namespace, clusterCIDR) - if err != nil { - return nil, err - } - virtualClusterScheduler, err := getVirtualClusterSchedulerManifest(name, namespace) - if err != nil { - return nil, err - } - - manifest := map[string]*appsv1.Deployment{ - constants.KubeControllerManagerComponent: kubeControllerManager, - constants.VirtualClusterSchedulerComponent: virtualClusterScheduler, - } - - return manifest, nil -} - -func getComponentConfigMapManifests(name, namespace string) (map[string]*v1.ConfigMap, error) { - virtualClusterSchedulerConfigMap, err := getVirtualClusterSchedulerConfigMapManifest(name, namespace) - if err != nil { - return nil, err - } - - manifest := map[string]*v1.ConfigMap{ - constants.VirtualClusterSchedulerComponentConfigMap: virtualClusterSchedulerConfigMap, - } - - return manifest, nil -} - -// getComponentConfigmaps return component configmaps names -func getComponentConfigmaps(component string) []string { - if component == constants.VirtualClusterSchedulerComponent { - return []string{constants.VirtualClusterSchedulerComponentConfigMap} - } - return nil -} - -func getKubeControllerManagerManifest(name, namespace, clusterCIDR string) (*appsv1.Deployment, error) { - imageRepository, imageVersion := util.GetImageMessage() - - vclabel := util.GetVirtualControllerLabel() - - IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) - if err != nil { - return nil, err - } - - podSubnet := constants.KubeControllerManagerPodSubnet - if len(clusterCIDR) > 0 { - podSubnet = clusterCIDR - } - - kubeControllerManagerBytes, err := util.ParseTemplate(controller.KubeControllerManagerDeployment, struct { - DeploymentName, Namespace, ImageRepository, Version, VirtualControllerLabel, PodSubnet string - VirtualClusterCertsSecret, KubeconfigSecret, ServiceSubnet string - Replicas int32 - IPV6First bool - }{ - DeploymentName: fmt.Sprintf("%s-%s", name, "kube-controller-manager"), - Namespace: namespace, - ImageRepository: imageRepository, - Version: imageVersion, - VirtualControllerLabel: vclabel, - VirtualClusterCertsSecret: util.GetCertName(name), - KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), - ServiceSubnet: constants.APIServerServiceSubnet, - PodSubnet: podSubnet, - Replicas: constants.KubeControllerReplicas, - IPV6First: IPV6FirstFlag, - }) - if err != nil { - return nil, fmt.Errorf("error when parsing kube-controller-manager deployment template: %w", err) - } - - kcm := &appsv1.Deployment{} - if err := yaml.Unmarshal([]byte(kubeControllerManagerBytes), kcm); err != nil { - return nil, fmt.Errorf("err when decoding kube-controller-manager deployment: %w", err) - } - - return kcm, nil -} - -func getVirtualClusterSchedulerConfigMapManifest(name, namespace string) (*v1.ConfigMap, error) { - virtualClusterSchedulerConfigMapBytes, err := util.ParseTemplate(scheduler.VirtualClusterSchedulerConfigMap, struct { - DeploymentName, Namespace string - }{ - DeploymentName: fmt.Sprintf("%s-%s", name, "virtualcluster-scheduler"), - Namespace: namespace, - }) - if err != nil { - return nil, fmt.Errorf("error when parsing virtualCluster scheduler configMap template: %w", err) - } - - config := &v1.ConfigMap{} - if err := yaml.Unmarshal([]byte(virtualClusterSchedulerConfigMapBytes), config); err != nil { - return nil, fmt.Errorf("err when decoding virtualCluster-scheduler configMap: %w", err) - } - - return config, nil -} - -func getVirtualClusterSchedulerManifest(name, namespace string) (*appsv1.Deployment, error) { - imageRepository, imageVersion := util.GetImageMessage() - vclabel := util.GetVirtualControllerLabel() - virtualClusterSchedulerBytes, err := util.ParseTemplate(scheduler.VirtualClusterSchedulerDeployment, struct { - Replicas int32 - DeploymentName, Namespace, SystemNamespace, ImageRepository, Version, VirtualControllerLabel string - Image, KubeconfigSecret string - }{ - DeploymentName: fmt.Sprintf("%s-%s", name, "virtualcluster-scheduler"), - Namespace: namespace, - SystemNamespace: constants.SystemNs, - ImageRepository: imageRepository, - VirtualControllerLabel: vclabel, - Version: imageVersion, - KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), - Replicas: constants.VirtualClusterSchedulerReplicas, - }) - if err != nil { - return nil, fmt.Errorf("error when parsing virtualCluster-scheduler deployment template: %w", err) - } - - deploy := &appsv1.Deployment{} - if err := yaml.Unmarshal([]byte(virtualClusterSchedulerBytes), deploy); err != nil { - return nil, fmt.Errorf("err when decoding virtualCluster-scheduler deployment: %w", err) - } - - return deploy, nil -} diff --git a/pkg/kubenest/controlplane/endpoint.go b/pkg/kubenest/controlplane/endpoint.go deleted file mode 100644 index 8f6758555..000000000 --- a/pkg/kubenest/controlplane/endpoint.go +++ /dev/null @@ -1,203 +0,0 @@ -package controlplane - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/common" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/virtualcluster" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func EnsureAPIServerExternalEndPoint(kubeClient kubernetes.Interface, apiServerExternalResource common.APIServerExternalResource) error { - err := EnsureKosmosSystemNamespace(kubeClient) - if err != nil { - return err - } - - err = CreateOrUpdateAPIServerExternalEndpoint(kubeClient, apiServerExternalResource) - if err != nil { - return err - } - - err = CreateOrUpdateAPIServerExternalService(kubeClient) - if err != nil { - return err - } - return nil -} - -func CreateOrUpdateAPIServerExternalEndpoint(kubeClient kubernetes.Interface, apiServerExternalResource common.APIServerExternalResource) error { - klog.V(4).Info("begin to create or update api-server-external-service endpoint") - nodes, err := util.GetAPIServerNodes(apiServerExternalResource.RootClientSet, apiServerExternalResource.Namespace) - if err != nil { - return fmt.Errorf("failed to get API server nodes: %w", err) - } - if len(nodes.Items) == 0 { - return fmt.Errorf("no API server nodes found in the cluster") - } - - var addresses []corev1.EndpointAddress - for _, node := range nodes.Items { - klog.V(4).Infof("API server node: %s", node.Name) - for _, address := range node.Status.Addresses { - if address.Type == corev1.NodeInternalIP { - klog.V(4).Infof("Node internal IP: %s", address.Address) - addresses = append(addresses, corev1.EndpointAddress{ - IP: address.Address, - }) - } - } - } - - if len(addresses) == 0 { - return fmt.Errorf("no internal IP addresses found for the API server nodes") - } - - apiServerPort, ok := apiServerExternalResource.Vc.Status.PortMap[constants.APIServerPortKey] - if !ok { - return fmt.Errorf("failed to get API server port from VirtualCluster status") - } - klog.V(4).Infof("API server port: %d", apiServerPort) - - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.KosmosNs, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: addresses, - Ports: []corev1.EndpointPort{ - { - Name: "https", - Port: apiServerPort, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, - } - - _, err = kubeClient.CoreV1().Endpoints(constants.KosmosNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - _, err = kubeClient.CoreV1().Endpoints(constants.KosmosNs).Create(context.TODO(), endpoint, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create api-server-external-service endpoint: %w", err) - } - klog.V(4).Info("api-server-external-service endpoint created successfully") - } else { - return fmt.Errorf("failed to get api-server-external-service endpoint: %w", err) - } - } else { - _, err = kubeClient.CoreV1().Endpoints(constants.KosmosNs).Update(context.TODO(), endpoint, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update api-server-external-service endpoint: %w", err) - } - klog.V(4).Info("api-server-external-service endpoint updated successfully") - } - - return nil -} - -func CreateOrUpdateAPIServerExternalService(kubeClient kubernetes.Interface) error { - port, ipFamilies, err := getEndPointInfo(kubeClient) - if err != nil { - return fmt.Errorf("error when getEndPointPort: %w", err) - } - apiServerExternalServiceBytes, err := util.ParseTemplate(virtualcluster.APIServerExternalService, struct { - ServicePort int32 - IPFamilies []corev1.IPFamily - }{ - ServicePort: port, - IPFamilies: ipFamilies, - }) - if err != nil { - return fmt.Errorf("error when parsing api-server-external-serive template: %w", err) - } - - var svc corev1.Service - if err := yaml.Unmarshal([]byte(apiServerExternalServiceBytes), &svc); err != nil { - return fmt.Errorf("err when decoding api-server-external-service in virtual cluster: %w", err) - } - klog.V(4).Infof("create svc %s: %s", constants.APIServerExternalService, apiServerExternalServiceBytes) - _, err = kubeClient.CoreV1().Services(constants.KosmosNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - _, err = kubeClient.CoreV1().Services(constants.KosmosNs).Create(context.TODO(), &svc, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("error when creating api-server-external-service: %w", err) - } - klog.V(4).Info("successfully created api-server-external-service service") - } else { - return fmt.Errorf("error when get api-server-external-service: %w", err) - } - } else { - _, err = kubeClient.CoreV1().Services(constants.KosmosNs).Update(context.TODO(), &svc, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("error when updating api-server-external-service: %w", err) - } - klog.V(4).Info("successfully updated api-server-external-service service") - } - - return nil -} - -func getEndPointInfo(kubeClient kubernetes.Interface) (int32, []corev1.IPFamily, error) { - klog.V(4).Info("begin to get Endpoints ports...") - ipFamilies := utils.IPFamilyGenerator(constants.APIServerServiceSubnet) - endpoints, err := kubeClient.CoreV1().Endpoints(constants.KosmosNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) - if err != nil { - klog.Errorf("get Endpoints failed: %v", err) - return 0, ipFamilies, err - } - - if len(endpoints.Subsets) == 0 { - klog.Errorf("subsets is empty") - return 0, ipFamilies, fmt.Errorf("No subsets found in the endpoints") - } - - subset := endpoints.Subsets[0] - - if len(subset.Ports) == 0 { - klog.Errorf("Port not found in the endpoint") - return 0, ipFamilies, fmt.Errorf("No ports found in the endpoint") - } - - port := subset.Ports[0].Port - klog.V(4).Infof("The port number was successfully obtained: %d", port) - return port, ipFamilies, nil -} - -func EnsureKosmosSystemNamespace(kubeClient kubernetes.Interface) error { - _, err := kubeClient.CoreV1().Namespaces().Get(context.Background(), constants.KosmosNs, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.KosmosNs, - }, - } - _, err = kubeClient.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create kosmos-system namespace: %v", err) - } - klog.V(4).Info("Created kosmos-system namespace") - return nil - } - - return fmt.Errorf("failed to get kosmos-system namespace: %v", err) - } - - return nil -} diff --git a/pkg/kubenest/controlplane/endpoint_test.go b/pkg/kubenest/controlplane/endpoint_test.go deleted file mode 100644 index 36c9f9aa2..000000000 --- a/pkg/kubenest/controlplane/endpoint_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package controlplane - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - fakeclientset "k8s.io/client-go/kubernetes/fake" - k8stesting "k8s.io/client-go/testing" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -func TestEnsureKosmosSystemNamespace(t *testing.T) { - t.Run("Namespace exists", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset(&corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.KosmosNs, - }, - }) - err := EnsureKosmosSystemNamespace(client) - assert.NoError(t, err, "Namespace already exists but failed") - }) - - t.Run("Namespace not exists and created successfully", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset() - err := EnsureKosmosSystemNamespace(client) - assert.NoError(t, err, "Failed to create namespace") - }) - - t.Run("Error creating namespace", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset() - client.PrependReactor("create", "namespaces", func(action k8stesting.Action) (bool, runtime.Object, error) { - return true, nil, fmt.Errorf("creation error") - }) - err := EnsureKosmosSystemNamespace(client) - assert.Error(t, err, "Expected error when creating namespace") - assert.EqualError(t, err, "failed to create kosmos-system namespace: creation error", "Error message mismatch") - }) -} - -func TestCreateOrUpdateAPIServerExternalService(t *testing.T) { - t.Run("Successfully create Service", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset() - - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.KosmosNs, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - {IP: "192.168.1.2"}, - }, - Ports: []corev1.EndpointPort{ - {Port: 6443}, - }, - }, - }, - } - - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.KosmosNs, - }, - } - - _, err := client.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) - assert.NoError(t, err) - - _, err = client.CoreV1().Endpoints(constants.KosmosNs).Create(context.TODO(), endpoint, metav1.CreateOptions{}) - assert.NoError(t, err) - - err = CreateOrUpdateAPIServerExternalService(client) - assert.NoError(t, err) - - svc, err := client.CoreV1().Services(constants.KosmosNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, svc) - assert.Equal(t, constants.APIServerExternalService, svc.Name) - assert.Equal(t, int32(6443), svc.Spec.Ports[0].Port) - }) - - t.Run("Error case - Endpoint not found", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset() - err := CreateOrUpdateAPIServerExternalService(client) - assert.Error(t, err) - assert.Equal(t, "error when getEndPointPort: endpoints \"api-server-external-service\" not found", err.Error()) - }) -} - -func TestGetEndPointInfo(t *testing.T) { - t.Run("Successfully retrieve Endpoint info", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset(&corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.KosmosNs, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - {IP: "192.168.1.1"}, - }, - Ports: []corev1.EndpointPort{ - {Port: 6443}, - }, - }, - }, - }) - port, ipFamilies, err := getEndPointInfo(client) - assert.NoError(t, err) - assert.Equal(t, int32(6443), port) - assert.Contains(t, ipFamilies, corev1.IPv4Protocol) - assert.NotContains(t, ipFamilies, corev1.IPv6Protocol) - }) - - t.Run("No subsets in endpoint", func(t *testing.T) { - client := fakeclientset.NewSimpleClientset(&corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: constants.APIServerExternalService, - Namespace: constants.KosmosNs, - }, - }) - _, _, err := getEndPointInfo(client) - assert.Error(t, err) - assert.Contains(t, err.Error(), "No subsets found in the endpoints") - }) -} diff --git a/pkg/kubenest/controlplane/etcd.go b/pkg/kubenest/controlplane/etcd.go deleted file mode 100644 index f43987605..000000000 --- a/pkg/kubenest/controlplane/etcd.go +++ /dev/null @@ -1,126 +0,0 @@ -package controlplane - -import ( - "fmt" - "strings" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/component-base/cli/flag" - "k8s.io/klog" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/etcd" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureVirtualClusterEtcd(client clientset.Interface, name, namespace string, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - return installEtcd(client, name, namespace, kubeNestConfiguration, vc) -} - -func DeleteVirtualClusterEtcd(client clientset.Interface, name, namespace string) error { - sts := util.GetEtcdServerName(name) - if err := util.DeleteStatefulSet(client, sts, namespace); err != nil { - return errors.Wrapf(err, "Failed to delete statefulset %s/%s", sts, namespace) - } - return nil -} - -// nolint -func installEtcd(client clientset.Interface, name, namespace string, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - imageRepository, imageVersion := util.GetImageMessage() - - var resourceQuantity resource.Quantity - var err error - - if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ETCDUnitSize != "" { - resourceQuantity, err = resource.ParseQuantity(vc.Spec.KubeInKubeConfig.ETCDUnitSize) - if err != nil { - klog.Errorf("Failed to parse etcdSize %s: %v", vc.Spec.KubeInKubeConfig.ETCDUnitSize, err) - return err - } - if resourceQuantity.Value() <= 0 { - klog.Errorf("Invalid vc.Spec.KubeInKubeConfig.ETCDUnitSize: must be greater than zero") - return err - } - resourceQuantity.Set(resourceQuantity.Value()) - } else { - nodeCount := getNodeCountFromPromotePolicy(vc) - resourceQuantity, err = resource.ParseQuantity(kubeNestConfiguration.KubeInKubeConfig.ETCDUnitSize) - if err != nil { - klog.Errorf("Failed to parse quantity %s: %v", kubeNestConfiguration.KubeInKubeConfig.ETCDUnitSize, err) - return err - } - resourceQuantity.Set(resourceQuantity.Value() * int64(nodeCount)) - } - - initialClusters := make([]string, constants.EtcdReplicas) - for index := range initialClusters { - memberName := fmt.Sprintf("%s-%d", util.GetEtcdServerName(name), index) - // build etcd member cluster peer url - memberPeerURL := fmt.Sprintf("http://%s.%s.%s.svc.cluster.local:%v", - memberName, - util.GetEtcdServerName(name), - namespace, - constants.EtcdListenPeerPort, - ) - initialClusters[index] = fmt.Sprintf("%s=%s", memberName, memberPeerURL) - } - vcLabel := util.GetVirtualControllerLabel() - IPV6FirstFlag, newErr := util.IPV6First(constants.APIServerServiceSubnet) - if newErr != nil { - return err - } - etcdStatefulSetBytes, err := util.ParseTemplate(etcd.EtcdStatefulSet, struct { - StatefulSetName, Namespace, ImageRepository, Image, EtcdClientService, Version, VirtualControllerLabel string - CertsSecretName, EtcdPeerServiceName string - InitialCluster, EtcdDataVolumeName, EtcdCipherSuites string - Replicas, EtcdListenClientPort, EtcdListenPeerPort int32 - ETCDStorageClass, ETCDStorageSize string - IPV6First bool - }{ - StatefulSetName: util.GetEtcdServerName(name), - Namespace: namespace, - ImageRepository: imageRepository, - Version: imageVersion, - VirtualControllerLabel: vcLabel, - EtcdClientService: util.GetEtcdClientServerName(name), - CertsSecretName: util.GetEtcdCertName(name), - EtcdPeerServiceName: util.GetEtcdServerName(name), - EtcdDataVolumeName: constants.EtcdDataVolumeName, - InitialCluster: strings.Join(initialClusters, ","), - EtcdCipherSuites: strings.Join(flag.PreferredTLSCipherNames(), ","), - Replicas: constants.EtcdReplicas, - EtcdListenClientPort: constants.EtcdListenClientPort, - EtcdListenPeerPort: constants.EtcdListenPeerPort, - ETCDStorageClass: kubeNestConfiguration.KubeInKubeConfig.ETCDStorageClass, - ETCDStorageSize: resourceQuantity.String(), - IPV6First: IPV6FirstFlag, - }) - if err != nil { - return fmt.Errorf("error when parsing Etcd statefuelset template: %w", err) - } - - etcdStatefulSet := &appsv1.StatefulSet{} - if err := yaml.Unmarshal([]byte(etcdStatefulSetBytes), etcdStatefulSet); err != nil { - return fmt.Errorf("error when decoding Etcd StatefulSet: %w", err) - } - - if err := util.CreateOrUpdateStatefulSet(client, etcdStatefulSet); err != nil { - return fmt.Errorf("error when creating Etcd statefulset, err: %w", err) - } - - return nil -} - -func getNodeCountFromPromotePolicy(vc *v1alpha1.VirtualCluster) int32 { - var nodeCount int32 - for _, policy := range vc.Spec.PromotePolicies { - nodeCount = nodeCount + policy.NodeCount - } - return nodeCount -} diff --git a/pkg/kubenest/controlplane/proxy.go b/pkg/kubenest/controlplane/proxy.go deleted file mode 100644 index 657350266..000000000 --- a/pkg/kubenest/controlplane/proxy.go +++ /dev/null @@ -1,125 +0,0 @@ -package controlplane - -import ( - "fmt" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/proxy" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureVirtualClusterProxy(client clientset.Interface, kubeconfigString, clusterCIDR string) error { - // install kube-proxy ds in virtual cluster - if err := installProxyDaemonSet(client); err != nil { - return fmt.Errorf("failed to install virtual cluster proxy, err: %w", err) - } - - // install kube-proxy cm in virtual cluster - if err := installProxyConfigMap(client, kubeconfigString, clusterCIDR); err != nil { - return fmt.Errorf("failed to install virtual cluster proxy, err: %w", err) - } - - // install kube-proxy sa in virtual cluster - if err := installProxySA(client); err != nil { - return fmt.Errorf("failed to install virtual cluster proxy, err: %w", err) - } - return nil -} - -func DeleteVirtualClusterProxy(client clientset.Interface) error { - daemonSetName := fmt.Sprintf("%s-%s", "kube", "proxy") - daemonSetNameSpace := fmt.Sprintf("%s-%s", "kube", "system") - if err := util.DeleteDaemonSet(client, daemonSetName, daemonSetNameSpace); err != nil { - return errors.Wrapf(err, "Failed to delete daemonSet %s/%s", daemonSetName, daemonSetNameSpace) - } - - cmName := fmt.Sprintf("%s-%s", "kube", "proxy") - cmNameSpace := fmt.Sprintf("%s-%s", "kube", "system") - if err := util.DeleteConfigmap(client, cmName, cmNameSpace); err != nil { - return errors.Wrapf(err, "Failed to delete ConfigMap %s/%s", cmName, cmNameSpace) - } - - saName := fmt.Sprintf("%s-%s", "kube", "proxy") - saNameSpace := fmt.Sprintf("%s-%s", "kube", "system") - if err := util.DeleteServiceAccount(client, saName, saNameSpace); err != nil { - return errors.Wrapf(err, "Failed to delete ServiceAccount %s/%s", saName, saNameSpace) - } - return nil -} - -func installProxyDaemonSet(client clientset.Interface) error { - imageRepository, imageVersion := util.GetImageMessage() - - proxyDaemonSetBytes, err := util.ParseTemplate(proxy.ProxyDaemonSet, struct { - DaemonSetName, Namespace, ImageRepository, Version string - }{ - DaemonSetName: fmt.Sprintf("%s-%s", "kube", "proxy"), - Namespace: fmt.Sprintf("%s-%s", "kube", "system"), - ImageRepository: imageRepository, - Version: imageVersion, - }) - if err != nil { - return fmt.Errorf("error when parsing virtual cluster proxy daemonSet template: %w", err) - } - - proxyDaemonSet := &appsv1.DaemonSet{} - if err := yaml.Unmarshal([]byte(proxyDaemonSetBytes), proxyDaemonSet); err != nil { - return fmt.Errorf("error when decoding virtual cluster proxy daemonSet: %w", err) - } - - if err := util.CreateOrUpdateDaemonSet(client, proxyDaemonSet); err != nil { - return fmt.Errorf("error when creating daemonSet for %s, err: %w", proxyDaemonSet.Name, err) - } - return nil -} - -func installProxyConfigMap(client clientset.Interface, kubeconfigString, clusterCIDR string) error { - proxyConfigMapBytes, err := util.ParseTemplate(proxy.ProxyConfigMap, struct { - ConfigMapName, Namespace, KubeProxyKubeConfig, ClusterCIDR string - }{ - ConfigMapName: fmt.Sprintf("%s-%s", "kube", "proxy"), - Namespace: fmt.Sprintf("%s-%s", "kube", "system"), - KubeProxyKubeConfig: kubeconfigString, - ClusterCIDR: clusterCIDR, - }) - if err != nil { - return fmt.Errorf("error when parsing virtual cluster proxy configmap template: %w", err) - } - - proxyConfigMap := &corev1.ConfigMap{} - if err := yaml.Unmarshal([]byte(proxyConfigMapBytes), proxyConfigMap); err != nil { - return fmt.Errorf("error when decoding virtual cluster proxy configmap: %w", err) - } - - if err := util.CreateOrUpdateConfigMap(client, proxyConfigMap); err != nil { - return fmt.Errorf("error when creating configmap for %s, err: %w", proxyConfigMap.Name, err) - } - return nil -} - -func installProxySA(client clientset.Interface) error { - proxySABytes, err := util.ParseTemplate(proxy.ProxySA, struct { - SAName, Namespace string - }{ - SAName: fmt.Sprintf("%s-%s", "kube", "proxy"), - Namespace: fmt.Sprintf("%s-%s", "kube", "system"), - }) - if err != nil { - return fmt.Errorf("error when parsing virtual cluster proxy SA template: %w", err) - } - - proxySA := &corev1.ServiceAccount{} - if err := yaml.Unmarshal([]byte(proxySABytes), proxySA); err != nil { - return fmt.Errorf("error when decoding virtual cluster proxy SA: %w", err) - } - - if err := util.CreateOrUpdateServiceAccount(client, proxySA); err != nil { - return fmt.Errorf("error when creating SA for %s, err: %w", proxySA.Name, err) - } - return nil -} diff --git a/pkg/kubenest/controlplane/service.go b/pkg/kubenest/controlplane/service.go deleted file mode 100644 index ce1efcccc..000000000 --- a/pkg/kubenest/controlplane/service.go +++ /dev/null @@ -1,165 +0,0 @@ -package controlplane - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/apiserver" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/coredns/host" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/etcd" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func EnsureVirtualClusterService(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestOpt *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - if err := createServerService(client, name, namespace, portMap, kubeNestOpt, vc); err != nil { - return fmt.Errorf("failed to create virtual cluster apiserver-service, err: %w", err) - } - return nil -} - -func DeleteVirtualClusterService(client clientset.Interface, name, namespace string) error { - services := []string{ - util.GetAPIServerName(name), - util.GetEtcdServerName(name), - util.GetEtcdClientServerName(name), - "kube-dns", - util.GetKonnectivityServerName(name), - util.GetKonnectivityAPIServerName(name), - } - for _, service := range services { - err := client.CoreV1().Services(namespace).Delete(context.TODO(), service, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("Service %s/%s not found, skip delete", service, namespace) - continue - } - return errors.Wrapf(err, "Failed to delete service %s/%s", service, namespace) - } - } - - klog.V(2).Infof("Successfully uninstalled service for virtualcluster %s", name) - return nil -} - -func createServerService(client clientset.Interface, name, namespace string, portMap map[string]int32, _ *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - ipFamilies := utils.IPFamilyGenerator(constants.APIServerServiceSubnet) - apiserverServiceBytes, err := util.ParseTemplate(apiserver.ApiserverService, struct { - ServiceName, Namespace, ServiceType string - ServicePort int32 - IPFamilies []corev1.IPFamily - UseAPIServerNodePort bool - }{ - ServiceName: util.GetAPIServerName(name), - Namespace: namespace, - ServiceType: constants.APIServerServiceType, - ServicePort: portMap[constants.APIServerPortKey], - IPFamilies: ipFamilies, - UseAPIServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort, - }) - if err != nil { - return fmt.Errorf("error when parsing virtualClusterApiserver serive template: %w", err) - } - anpServiceBytes, err := util.ParseTemplate(apiserver.ApiserverAnpService, struct { - ServiceName, Namespace string - ProxyServerPort int32 - }{ - ServiceName: util.GetKonnectivityServerName(name), - Namespace: namespace, - ProxyServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], - }) - if err != nil { - return fmt.Errorf("error when parsing virtualClusterApiserver anp service template: %w", err) - } - - apiserverService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(apiserverServiceBytes), apiserverService); err != nil { - return fmt.Errorf("error when decoding virtual cluster apiserver service: %w", err) - } - if err := util.CreateOrUpdateService(client, apiserverService); err != nil { - return fmt.Errorf("err when creating virtual cluster apiserver service for %s, err: %w", apiserverService.Name, err) - } - - anpService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(anpServiceBytes), anpService); err != nil { - return fmt.Errorf("error when decoding virtual cluster anp service: %w", err) - } - if err := util.CreateOrUpdateService(client, anpService); err != nil { - return fmt.Errorf("err when creating virtual cluster anp service for %s, err: %w", anpService.Name, err) - } - - etcdServicePeerBytes, err := util.ParseTemplate(etcd.EtcdPeerService, struct { - ServiceName, Namespace string - EtcdListenClientPort, EtcdListenPeerPort int32 - }{ - ServiceName: util.GetEtcdServerName(name), - Namespace: namespace, - EtcdListenClientPort: constants.EtcdListenClientPort, - EtcdListenPeerPort: constants.EtcdListenPeerPort, - }) - if err != nil { - return fmt.Errorf("error when parsing Etcd client serive template: %w", err) - } - - etcdPeerService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(etcdServicePeerBytes), etcdPeerService); err != nil { - return fmt.Errorf("error when decoding Etcd client service: %w", err) - } - - if err := util.CreateOrUpdateService(client, etcdPeerService); err != nil { - return fmt.Errorf("error when creating etcd client service, err: %w", err) - } - - //etcd-client service - etcdClientServiceBytes, err := util.ParseTemplate(etcd.EtcdClientService, struct { - ServiceName, Namespace string - EtcdListenClientPort int32 - }{ - ServiceName: util.GetEtcdClientServerName(name), - Namespace: namespace, - EtcdListenClientPort: constants.EtcdListenClientPort, - }) - if err != nil { - return fmt.Errorf("error when parsing Etcd client serive template: %w", err) - } - - etcdClientService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(etcdClientServiceBytes), etcdClientService); err != nil { - return fmt.Errorf("err when decoding Etcd client service: %w", err) - } - - if err := util.CreateOrUpdateService(client, etcdClientService); err != nil { - return fmt.Errorf("err when creating etcd client service, err: %w", err) - } - - //core-dns service - coreDNSServiceBytes, err := util.ParseTemplate(host.CoreDNSService, struct { - Namespace string - }{ - Namespace: namespace, - }) - if err != nil { - return fmt.Errorf("error when parsing core-dns serive template: %w", err) - } - - coreDNSService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(coreDNSServiceBytes), coreDNSService); err != nil { - return fmt.Errorf("err when decoding core-dns service: %w", err) - } - - if err := util.CreateOrUpdateService(client, coreDNSService); err != nil { - return fmt.Errorf("err when creating core-dns service, err: %w", err) - } - - return nil -} diff --git a/pkg/kubenest/init.go b/pkg/kubenest/init.go deleted file mode 100644 index 26f2a7129..000000000 --- a/pkg/kubenest/init.go +++ /dev/null @@ -1,291 +0,0 @@ -package kubenest - -import ( - "errors" - "fmt" - - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilversion "k8s.io/apimachinery/pkg/util/version" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - "github.com/kosmos.io/kosmos/pkg/kubenest/tasks" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/kubenest/util/cert" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -var _ tasks.InitData = &initData{} - -type initData struct { - cert.CertStore - name string - namespace string - virtualClusterVersion *utilversion.Version - controlplaneAddr string - clusterIps []string - remoteClient clientset.Interface - kosmosClient versioned.Interface - dynamicClient *dynamic.DynamicClient - virtualClusterDataDir string - privateRegistry string - externalIP string - externalIps []string - vipMap map[string]string - hostPort int32 - hostPortMap map[string]int32 - kubeNestOptions *v1alpha1.KubeNestConfiguration - virtualCluster *v1alpha1.VirtualCluster - ETCDStorageClass string - ETCDUnitSize string -} - -type InitOptions struct { - Name string - Namespace string - Kubeconfig *rest.Config - virtualClusterVersion string - virtualClusterDataDir string - virtualCluster *v1alpha1.VirtualCluster - KubeNestOptions *v1alpha1.KubeNestConfiguration -} - -func NewInitPhase(opts *InitOptions) *workflow.Phase { - initPhase := workflow.NewPhase() - - initPhase.AppendTask(tasks.NewVirtualClusterServiceTask()) - initPhase.AppendTask(tasks.NewCertTask()) - initPhase.AppendTask(tasks.NewUploadCertsTask()) - initPhase.AppendTask(tasks.NewEtcdTask()) - initPhase.AppendTask(tasks.NewVirtualClusterApiserverTask()) - initPhase.AppendTask(tasks.NewUploadKubeconfigTask()) - initPhase.AppendTask(tasks.NewCheckApiserverHealthTask()) - initPhase.AppendTask(tasks.NewComponentTask()) - initPhase.AppendTask(tasks.NewCheckControlPlaneTask()) - initPhase.AppendTask(tasks.NewAnpTask()) - // create proxy - //initPhase.AppendTask(tasks.NewVirtualClusterProxyTask()) - // create core-dns - initPhase.AppendTask(tasks.NewCoreDNSTask()) - // add server - initPhase.AppendTask(tasks.NewComponentsFromManifestsTask()) - initPhase.AppendTask(tasks.NewEndPointTask()) - - initPhase.SetDataInitializer(func() (workflow.RunData, error) { - return newRunData(opts) - }) - return initPhase -} - -func UninstallPhase(opts *InitOptions) *workflow.Phase { - destroyPhase := workflow.NewPhase() - destroyPhase.AppendTask(tasks.UninstallCoreDNSTask()) - destroyPhase.AppendTask(tasks.UninstallComponentTask()) - destroyPhase.AppendTask(tasks.UninstallVirtualClusterApiserverTask()) - destroyPhase.AppendTask(tasks.UninstallAnpTask()) - destroyPhase.AppendTask(tasks.UninstallEtcdTask()) - destroyPhase.AppendTask(tasks.UninstallVirtualClusterServiceTask()) - destroyPhase.AppendTask(tasks.UninstallCertsAndKubeconfigTask()) - destroyPhase.AppendTask(tasks.DeleteEtcdPvcTask()) - //destroyPhase.AppendTask(tasks.UninstallVirtualClusterProxyTask()) - - destroyPhase.SetDataInitializer(func() (workflow.RunData, error) { - return newRunData(opts) - }) - return destroyPhase -} - -type InitOpt func(o *InitOptions) - -func NewPhaseInitOptions(opts ...InitOpt) *InitOptions { - options := defaultJobInitOptions() - - for _, c := range opts { - c(options) - } - return options -} - -func defaultJobInitOptions() *InitOptions { - virtualCluster := &v1alpha1.VirtualCluster{} - return &InitOptions{ - virtualClusterVersion: "0.0.0", - virtualClusterDataDir: "var/lib/virtualCluster", - virtualCluster: virtualCluster, - } -} - -func NewInitOptWithVirtualCluster(virtualCluster *v1alpha1.VirtualCluster) InitOpt { - return func(o *InitOptions) { - o.virtualCluster = virtualCluster - o.Name = virtualCluster.GetName() - o.Namespace = virtualCluster.GetNamespace() - } -} - -func NewInitOptWithKubeconfig(config *rest.Config) InitOpt { - return func(o *InitOptions) { - o.Kubeconfig = config - } -} - -func NewInitOptWithKubeNestOptions(options *v1alpha1.KubeNestConfiguration) InitOpt { - return func(o *InitOptions) { - o.KubeNestOptions = options - } -} - -func newRunData(opt *InitOptions) (*initData, error) { - if err := opt.Validate(); err != nil { - return nil, err - } - - localClusterClient, err := clientset.NewForConfig(opt.Kubeconfig) - if err != nil { - return nil, fmt.Errorf("error when creating local cluster client, err: %w", err) - } - var remoteClient clientset.Interface = localClusterClient - - dynamicClient, err := dynamic.NewForConfig(opt.Kubeconfig) - if err != nil { - return nil, err - } - - kosmosClient, err := versioned.NewForConfig(opt.Kubeconfig) - if err != nil { - return nil, fmt.Errorf("error when creating kosmosClient client, err: %w", err) - } - - version, err := utilversion.ParseGeneric(opt.virtualClusterVersion) - if err != nil { - return nil, fmt.Errorf("unexpected virtual cluster invalid version %s", opt.virtualClusterVersion) - } - - var address string - address, err = util.GetAPIServiceIP(remoteClient) - if err != nil { - return nil, fmt.Errorf("failed to get a valid node IP for APIServer, err: %w", err) - } - var clusterIPs []string - clusterIP, err := util.GetAPIServiceClusterIP(opt.Namespace, remoteClient) - clusterIPs = append(clusterIPs, clusterIP) - if err != nil { - return nil, fmt.Errorf("failed to get APIServer Service-ClusterIp, err: %w", err) - } - return &initData{ - name: opt.Name, - namespace: opt.Namespace, - virtualClusterVersion: version, - controlplaneAddr: address, - clusterIps: clusterIPs, - remoteClient: remoteClient, - dynamicClient: dynamicClient, - kosmosClient: kosmosClient, - virtualClusterDataDir: opt.virtualClusterDataDir, - privateRegistry: utils.DefaultImageRepository, - CertStore: cert.NewCertStore(), - externalIP: opt.virtualCluster.Spec.ExternalIP, - externalIps: opt.virtualCluster.Spec.ExternalIps, - hostPort: opt.virtualCluster.Status.Port, - hostPortMap: opt.virtualCluster.Status.PortMap, - vipMap: opt.virtualCluster.Status.VipMap, - kubeNestOptions: opt.KubeNestOptions, - virtualCluster: opt.virtualCluster, - ETCDUnitSize: opt.KubeNestOptions.KubeInKubeConfig.ETCDUnitSize, - ETCDStorageClass: opt.KubeNestOptions.KubeInKubeConfig.ETCDStorageClass, - }, nil -} - -// TODO Add more detailed verification content -func (opt *InitOptions) Validate() error { - var errs []error - - if len(opt.Name) == 0 || len(opt.Namespace) == 0 { - return errors.New("unexpected empty name or namespace") - } - - _, err := utilversion.ParseGeneric(opt.virtualClusterVersion) - if err != nil { - return fmt.Errorf("unexpected virtual cluster invalid version %s", opt.virtualClusterVersion) - } - - return utilerrors.NewAggregate(errs) -} - -func (i initData) GetName() string { - return i.name -} - -func (i initData) GetNamespace() string { - return i.namespace -} - -func (i initData) ControlplaneAddress() string { - return i.controlplaneAddr -} - -func (i initData) ServiceClusterIP() []string { - clusterIps, err := util.GetServiceClusterIP(i.namespace, i.remoteClient) - if err != nil { - return nil - } - return clusterIps -} - -func (i initData) RemoteClient() clientset.Interface { - return i.remoteClient -} - -func (i initData) KosmosClient() versioned.Interface { - return i.kosmosClient -} - -func (i initData) DataDir() string { - return i.virtualClusterDataDir -} - -func (i initData) VirtualCluster() *v1alpha1.VirtualCluster { - return i.virtualCluster -} - -func (i initData) ExternalIP() string { - return i.externalIP -} - -func (i initData) ExternalIPs() []string { return i.externalIps } - -func (i initData) VipMap() map[string]string { - return i.vipMap -} -func (i initData) HostPort() int32 { - return i.hostPort -} - -func (i initData) HostPortMap() map[string]int32 { - return i.hostPortMap -} - -func (i initData) DynamicClient() *dynamic.DynamicClient { - return i.dynamicClient -} - -func (i initData) KubeNestOpt() *v1alpha1.KubeNestConfiguration { - return i.kubeNestOptions -} - -func (i initData) PluginOptions() map[string]string { - if i.virtualCluster.Spec.PluginOptions == nil { - return nil - } - - pluginOptoinsMapping := map[string]string{} - - for _, option := range i.virtualCluster.Spec.PluginOptions { - pluginOptoinsMapping[option.Name] = option.Value - } - return pluginOptoinsMapping -} diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go deleted file mode 100644 index 4d9d34257..000000000 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go +++ /dev/null @@ -1,581 +0,0 @@ -package apiserver - -const ( - ApiserverDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - virtualCluster-app: apiserver - app.kubernetes.io/managed-by: virtual-cluster-controller - name: {{ .DeploymentName }} - namespace: {{ .Namespace }} -spec: - replicas: {{ .Replicas }} - selector: - matchLabels: - virtualCluster-app: apiserver - template: - metadata: - labels: - virtualCluster-app: apiserver - spec: - automountServiceAccountToken: false - {{ if not .UseAPIServerNodePort }} - hostNetwork: true - {{ end }} - dnsPolicy: ClusterFirstWithHostNet - tolerations: - - key: {{ .VirtualControllerLabel }} - operator: "Exists" - effect: "NoSchedule" - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .VirtualControllerLabel }} - operator: Exists - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: virtualCluster-app - operator: In - values: - - apiserver - topologyKey: kubernetes.io/hostname - containers: - - name: kube-apiserver - image: {{ .ImageRepository }}/kube-apiserver:{{ .Version }} - imagePullPolicy: IfNotPresent - env: - {{ if .UseAPIServerNodePort }} - - name: HOSTIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP - {{ else}} - - name: PODIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - {{ end }} - command: - - kube-apiserver - - --allow-privileged=true - - --authorization-mode=Node,RBAC - - --client-ca-file=/etc/virtualcluster/pki/ca.crt - - --enable-admission-plugins=NodeRestriction - - --enable-bootstrap-token-auth=true - - --etcd-cafile=/etc/etcd/pki/etcd-ca.crt - - --etcd-certfile=/etc/etcd/pki/etcd-client.crt - - --etcd-keyfile=/etc/etcd/pki/etcd-client.key - #- --etcd-servers=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }} - {{ if .IPV6First }} - - --etcd-servers=https://[{{ .EtcdClientService }}]:{{ .EtcdListenClientPort }} - {{ else }} - - --etcd-servers=https://{{ .EtcdClientService }}:{{ .EtcdListenClientPort }} - {{ end }} - - '--bind-address=::' - - --kubelet-client-certificate=/etc/virtualcluster/pki/virtualCluster.crt - - --kubelet-client-key=/etc/virtualcluster/pki/virtualCluster.key - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - - --secure-port={{ .ClusterPort }} - - --service-account-issuer=https://kubernetes.default.svc.cluster.local - - --service-account-key-file=/etc/virtualcluster/pki/virtualCluster.key - - --service-account-signing-key-file=/etc/virtualcluster/pki/virtualCluster.key - - --service-cluster-ip-range={{ .ServiceSubnet }} - - --proxy-client-cert-file=/etc/virtualcluster/pki/front-proxy-client.crt - - --proxy-client-key-file=/etc/virtualcluster/pki/front-proxy-client.key - - --requestheader-allowed-names=front-proxy-client - - --requestheader-client-ca-file=/etc/virtualcluster/pki/front-proxy-ca.crt - - --requestheader-extra-headers-prefix=X-Remote-Extra- - - --requestheader-group-headers=X-Remote-Group - - --requestheader-username-headers=X-Remote-User - - --tls-cert-file=/etc/virtualcluster/pki/apiserver.crt - - --tls-private-key-file=/etc/virtualcluster/pki/apiserver.key - - --tls-min-version=VersionTLS13 - - --max-requests-inflight=1500 - - --max-mutating-requests-inflight=500 - - --v=4 - {{ if .UseAPIServerNodePort }} - - --advertise-address=$(HOSTIP) - {{ else }} - - --advertise-address=$(PODIP) - {{ end }} - {{ if not .AdmissionPlugins }} - - --disable-admission-plugins=License - {{ end }} - livenessProbe: - failureThreshold: 8 - httpGet: - path: /livez - port: {{ .ClusterPort }} - scheme: HTTPS - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 15 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /readyz - port: {{ .ClusterPort }} - scheme: HTTPS - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 15 - ports: - - containerPort: {{ .ClusterPort }} - name: http - protocol: TCP - volumeMounts: - - mountPath: /etc/virtualcluster/pki - name: apiserver-cert - readOnly: true - - mountPath: /etc/etcd/pki - name: etcd-cert - readOnly: true - priorityClassName: system-node-critical - volumes: - - name: apiserver-cert - secret: - secretName: {{ .VirtualClusterCertsSecret }} - - name: etcd-cert - secret: - secretName: {{ .EtcdCertsSecret }} -` - ApiserverAnpDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - virtualCluster-app: apiserver - virtualCluster-anp: apiserver-anp - app.kubernetes.io/managed-by: virtual-cluster-controller - name: {{ .DeploymentName }} - namespace: {{ .Namespace }} -spec: - replicas: {{ .Replicas }} - strategy: - type: Recreate - selector: - matchLabels: - virtualCluster-app: apiserver - template: - metadata: - labels: - virtualCluster-app: apiserver - virtualCluster-anp: apiserver-anp - spec: - automountServiceAccountToken: false - {{ if not .UseAPIServerNodePort }} - hostNetwork: true - {{ end }} - dnsPolicy: ClusterFirstWithHostNet - tolerations: - - key: {{ .VirtualControllerLabel }} - operator: "Exists" - effect: "NoSchedule" - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .VirtualControllerLabel }} - operator: Exists - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: virtualCluster-app - operator: In - values: - - apiserver - topologyKey: kubernetes.io/hostname - containers: - - name: kube-apiserver - image: {{ .ImageRepository }}/kube-apiserver:{{ .Version }} - imagePullPolicy: IfNotPresent - env: - {{ if .UseAPIServerNodePort }} - - name: HOSTIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP - {{ else}} - - name: PODIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - {{ end }} - command: - - kube-apiserver - - --allow-privileged=true - - --authorization-mode=Node,RBAC - - --client-ca-file=/etc/virtualcluster/pki/ca.crt - - --enable-admission-plugins=NodeRestriction - - --enable-bootstrap-token-auth=true - - --etcd-cafile=/etc/etcd/pki/etcd-ca.crt - - --etcd-certfile=/etc/etcd/pki/etcd-client.crt - - --etcd-keyfile=/etc/etcd/pki/etcd-client.key - #- --etcd-servers=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }} - {{ if .IPV6First }} - - --etcd-servers=https://[{{ .EtcdClientService }}]:{{ .EtcdListenClientPort }} - {{ else }} - - --etcd-servers=https://{{ .EtcdClientService }}:{{ .EtcdListenClientPort }} - {{ end }} - - '--bind-address=::' - - --kubelet-client-certificate=/etc/virtualcluster/pki/virtualCluster.crt - - --kubelet-client-key=/etc/virtualcluster/pki/virtualCluster.key - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - - --secure-port={{ .ClusterPort }} - - --service-account-issuer=https://kubernetes.default.svc.cluster.local - - --service-account-key-file=/etc/virtualcluster/pki/virtualCluster.key - - --service-account-signing-key-file=/etc/virtualcluster/pki/virtualCluster.key - - --service-cluster-ip-range={{ .ServiceSubnet }} - - --proxy-client-cert-file=/etc/virtualcluster/pki/front-proxy-client.crt - - --proxy-client-key-file=/etc/virtualcluster/pki/front-proxy-client.key - - --requestheader-allowed-names=front-proxy-client - - --requestheader-client-ca-file=/etc/virtualcluster/pki/front-proxy-ca.crt - - --requestheader-extra-headers-prefix=X-Remote-Extra- - - --requestheader-group-headers=X-Remote-Group - - --requestheader-username-headers=X-Remote-User - - --tls-cert-file=/etc/virtualcluster/pki/apiserver.crt - - --tls-private-key-file=/etc/virtualcluster/pki/apiserver.key - - --tls-min-version=VersionTLS13 - - --max-requests-inflight=1500 - - --max-mutating-requests-inflight=500 - - --v=4 - {{ if .UseAPIServerNodePort }} - - --advertise-address=$(HOSTIP) - {{ else }} - - --advertise-address=$(PODIP) - {{ end }} - - --egress-selector-config-file=/etc/kubernetes/konnectivity-server-config/{{ .Namespace }}/{{ .Name }}/egress_selector_configuration.yaml - {{ if not .AdmissionPlugins }} - - --disable-admission-plugins=License - {{ end }} - livenessProbe: - failureThreshold: 8 - httpGet: - path: /livez - port: {{ .ClusterPort }} - scheme: HTTPS - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 15 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /readyz - port: {{ .ClusterPort }} - scheme: HTTPS - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 15 - ports: - - containerPort: {{ .ClusterPort }} - name: http - protocol: TCP - volumeMounts: - - mountPath: /etc/virtualcluster/pki - name: apiserver-cert - readOnly: true - - mountPath: /etc/etcd/pki - name: etcd-cert - readOnly: true - - mountPath: /etc/kubernetes/konnectivity-server/{{ .Namespace }}/{{ .Name }} - readOnly: false - name: konnectivity-uds - - name: kas-proxy - mountPath: /etc/kubernetes/konnectivity-server-config/{{ .Namespace }}/{{ .Name }}/egress_selector_configuration.yaml - subPath: egress_selector_configuration.yaml - - name: konnectivity-server-container - image: {{ .ImageRepository }}/kas-network-proxy-server:{{ .Version }} - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 500m - memory: 256Mi - securityContext: - allowPrivilegeEscalation: false - runAsUser: 0 - command: [ "/proxy-server"] - args: [ - "--log-file=/var/log/{{ .Namespace }}/{{ .Name }}/konnectivity-server.log", - "--logtostderr=true", - "--log-file-max-size=0", - "--cluster-cert=/etc/virtualcluster/pki/apiserver.crt", - "--cluster-key=/etc/virtualcluster/pki/apiserver.key", - {{ if eq .AnpMode "uds" }} - "--server-port=0", - "--mode=grpc", - "--uds-name=/etc/kubernetes/konnectivity-server/{{ .Namespace }}/{{ .Name }}/konnectivity-server.socket", - "--delete-existing-uds-file", - {{ else }} - "--server-port={{ .ServerPort }}", - "--mode=http-connect", - "--server-cert=/etc/virtualcluster/pki/proxy-server.crt", - "--server-ca-cert=/etc/virtualcluster/pki/ca.crt", - "--server-key=/etc/virtualcluster/pki/proxy-server.key", - {{ end }} - "--agent-port={{ .AgentPort }}", - "--health-port={{ .HealthPort }}", - "--admin-port={{ .AdminPort }}", - "--keepalive-time=1h", - "--agent-namespace=kube-system", - "--agent-service-account=konnectivity-agent", - "--kubeconfig=/etc/apiserver/kubeconfig", - "--authentication-audience=system:konnectivity-server", - ] - livenessProbe: - httpGet: - scheme: HTTP - host: 127.0.0.1 - port: {{ .HealthPort }} - path: /healthz - initialDelaySeconds: 10 - timeoutSeconds: 60 - ports: - - name: serverport - containerPort: {{ .ServerPort }} - hostPort: {{ .ServerPort }} - - name: agentport - containerPort: {{ .AgentPort }} - hostPort: {{ .AgentPort }} - - name: healthport - containerPort: {{ .HealthPort }} - hostPort: {{ .HealthPort }} - - name: adminport - containerPort: {{ .AdminPort }} - hostPort: {{ .AdminPort }} - volumeMounts: - - mountPath: /etc/virtualcluster/pki - name: apiserver-cert - readOnly: true - - name: varlogkonnectivityserver - mountPath: /var/log/{{ .Namespace }}/{{ .Name }} - readOnly: false - - name: konnectivity-home - mountPath: /etc/kubernetes/konnectivity-server/{{ .Namespace }}/{{ .Name }} - - mountPath: /etc/apiserver/kubeconfig - name: kubeconfig - subPath: kubeconfig - priorityClassName: system-node-critical - volumes: - - name: kubeconfig - secret: - defaultMode: 420 - secretName: {{ .KubeconfigSecret }} - - name: varlogkonnectivityserver - hostPath: - path: /var/log/{{ .Namespace }}/{{ .Name }} - type: DirectoryOrCreate - - name: konnectivity-home - hostPath: - path: /etc/kubernetes/konnectivity-server/{{ .Namespace }}/{{ .Name }} - type: DirectoryOrCreate - - name: apiserver-cert - secret: - secretName: {{ .VirtualClusterCertsSecret }} - - name: etcd-cert - secret: - secretName: {{ .EtcdCertsSecret }} - - name: konnectivity-uds - hostPath: - path: /etc/kubernetes/konnectivity-server/{{ .Namespace }}/{{ .Name }} - type: DirectoryOrCreate - - name: kas-proxy - configMap: - name: kas-proxy-files -` - ApiserverAnpAgentService = ` -apiVersion: v1 -kind: Service -metadata: - name: {{ .SVCName }} - namespace: {{ .Namespace }} -spec: - ports: - - port: {{ .ServerPort }} - name: serverport - targetPort: {{ .ServerPort }} - nodePort: {{ .ServerPort }} - - port: {{ .AgentPort }} - name: agentport - targetPort: {{ .AgentPort }} - nodePort: {{ .AgentPort }} - - port: {{ .HealthPort }} - name: healthport - targetPort: {{ .HealthPort }} - nodePort: {{ .HealthPort }} - - port: {{ .AdminPort }} - name: adminport - targetPort: {{ .AdminPort }} - nodePort: {{ .AdminPort }} - selector: - virtualCluster-app: apiserver - type: NodePort - ` - AnpAgentManifest = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:konnectivity-server - labels: - kubernetes.io/cluster-service: "true" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:konnectivity-server ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: konnectivity-agent - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: konnectivity-agent - namespace: kube-system - name: konnectivity-agent -spec: - selector: - matchLabels: - k8s-app: konnectivity-agent - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - k8s-app: konnectivity-agent - spec: - priorityClassName: system-cluster-critical - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - - operator: "Exists" - effect: "NoExecute" - nodeSelector: - kubernetes.io/os: linux - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: konnectivity-agent-container - image: {{ .ImageRepository }}/kas-network-proxy-agent:{{ .Version }} - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - cpu: 500m - memory: 500Mi - command: [ "/proxy-agent"] - args: [ - "--logtostderr=true", - "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - "--proxy-server-host=konnectivity-server.kube-system.svc.cluster.local", - "--proxy-server-port={{ .AgentPort }}", - "--sync-interval=5s", - "--sync-interval-cap=30s", - "--probe-interval=5s", - "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token", - "--agent-identifiers=ipv4=$(HOST_IP)", - {{ if ne .AnpMode "uds" }} - "--agent-cert=/etc/virtualcluster/pki/apiserver.crt", - "--agent-key=/etc/virtualcluster/pki/apiserver.key", - {{ end }} - ] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - livenessProbe: - httpGet: - scheme: HTTP - port: 8093 - path: /healthz - initialDelaySeconds: 15 - timeoutSeconds: 15 - readinessProbe: - httpGet: - scheme: HTTP - port: 8093 - path: /readyz - initialDelaySeconds: 15 - timeoutSeconds: 15 - volumeMounts: - - name: agent-cert - mountPath: /etc/virtualcluster/pki - - mountPath: /var/run/secrets/tokens - name: konnectivity-agent-token - serviceAccountName: konnectivity-agent - volumes: - - name: agent-cert - secret: - secretName: {{ .AgentCertName }} - - name: konnectivity-agent-token - projected: - sources: - - serviceAccountToken: - path: konnectivity-agent-token - audience: system:konnectivity-server ---- -apiVersion: v1 -kind: Service -metadata: - name: konnectivity-server - namespace: kube-system -spec: - ports: - - port: {{ .AgentPort }} - name: proxy-server - targetPort: {{ .AgentPort }} ---- -apiVersion: v1 -kind: Endpoints -metadata: - name: konnectivity-server - namespace: kube-system -subsets: - - addresses: - {{- range .ProxyServerHost }} - - ip: {{ . }} - {{- end }} - ports: - - port: {{ .AgentPort }} - name: proxy-server -` -) diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go deleted file mode 100644 index e2dd017f2..000000000 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go +++ /dev/null @@ -1,49 +0,0 @@ -package apiserver - -const ( - ApiserverService = ` -apiVersion: v1 -kind: Service -metadata: - labels: - virtualCluster-app: apiserver - app.kubernetes.io/managed-by: virtual-cluster-controller - name: {{ .ServiceName }} - namespace: {{ .Namespace }} -spec: - ipFamilies: - {{- range .IPFamilies }} - - {{ . }} - {{- end }} - ports: - - name: client - port: {{ .ServicePort }} - protocol: TCP - targetPort: {{ .ServicePort }} - {{ if .UseAPIServerNodePort }} - nodePort: {{ .ServicePort }} - {{ end }} - selector: - virtualCluster-app: apiserver - type: {{ .ServiceType }} -` - ApiserverAnpService = ` -apiVersion: v1 -kind: Service -metadata: - labels: - virtualCluster-app: apiserver - app.kubernetes.io/managed-by: virtual-cluster-controller - name: {{ .ServiceName }} - namespace: {{ .Namespace }} -spec: - ports: - - name: proxy-server - port: {{ .ProxyServerPort }} - protocol: TCP - targetPort: {{ .ProxyServerPort }} - selector: - virtualCluster-app: apiserver - type: ClusterIP -` -) diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go deleted file mode 100644 index 3b0b48969..000000000 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package apiserver - -import ( - "fmt" - "testing" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/yaml" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func ParseServerTemplate(apiServerServiceSubnet string) (*corev1.Service, error) { - ipFamilies := utils.IPFamilyGenerator(apiServerServiceSubnet) - apiserverServiceBytes, err := util.ParseTemplate(ApiserverService, struct { - ServiceName, Namespace, ServiceType string - ServicePort int32 - IPFamilies []corev1.IPFamily - UseAPIServerNodePort bool - }{ - ServiceName: fmt.Sprintf("%s-%s", "test", "apiserver"), - Namespace: "test-namespace", - ServiceType: constants.APIServerServiceType, - ServicePort: 40010, - IPFamilies: ipFamilies, - UseAPIServerNodePort: false, - }) - - if err != nil { - return nil, fmt.Errorf("error when parsing virtualClusterApiserver serive template: %s", err) - } - - apiserverService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(apiserverServiceBytes), apiserverService); err != nil { - return nil, fmt.Errorf("error when decoding virtual cluster apiserver service: %s", err) - } - return apiserverService, nil -} - -func CompareIPFamilies(a []corev1.IPFamily, b []corev1.IPFamily) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -func TestSyncIPPool(t *testing.T) { - tests := []struct { - name string - input string - want []corev1.IPFamily - }{ - { - name: "ipv4 only", - input: "10.237.6.0/18", - want: []corev1.IPFamily{corev1.IPv4Protocol}, - }, - { - name: "ipv6 only", - input: "2409:8c2f:3800:0011::0a18:0000/114", - want: []corev1.IPFamily{corev1.IPv6Protocol}, - }, - { - name: "ipv4 first", - input: "10.237.6.0/18,2409:8c2f:3800:0011::0a18:0000/114", - want: []corev1.IPFamily{corev1.IPv4Protocol, corev1.IPv6Protocol}, - }, - { - name: "ipv6 first", - input: "2409:8c2f:3800:0011::0a18:0000/114,10.237.6.0/18", - want: []corev1.IPFamily{corev1.IPv6Protocol, corev1.IPv4Protocol}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, err := ParseServerTemplate(tt.input) - if err != nil { - t.Fatalf("happen error: %s", err) - } - - if !CompareIPFamilies(svc.Spec.IPFamilies, tt.want) { - t.Errorf("ParseServerTemplate()=%v, want %v", svc.Spec.IPFamilies, tt.want) - } - }) - } -} diff --git a/pkg/kubenest/manifest/controlplane/apiserver/manifeats_configmap.go b/pkg/kubenest/manifest/controlplane/apiserver/manifeats_configmap.go deleted file mode 100644 index 3096aab02..000000000 --- a/pkg/kubenest/manifest/controlplane/apiserver/manifeats_configmap.go +++ /dev/null @@ -1,37 +0,0 @@ -package apiserver - -const ( - EgressSelectorConfiguration = ` -apiVersion: v1 -data: - egress_selector_configuration.yaml: | - apiVersion: apiserver.k8s.io/v1beta1 - kind: EgressSelectorConfiguration - egressSelections: - - name: cluster - connection: - proxyProtocol: {{ if eq .AnpMode "uds" }}GRPC{{ else }}HTTPConnect{{ end }} - transport: - {{ if eq .AnpMode "uds" }} - uds: - udsName: /etc/kubernetes/konnectivity-server/{{ .Namespace }}/{{ .Name }}/konnectivity-server.socket - {{ else }} - tcp: - url: https://{{ .SvcName }}:{{ .ProxyServerPort }} - tlsConfig: - caBundle: /etc/virtualcluster/pki/ca.crt - clientKey: /etc/virtualcluster/pki/proxy-server.key - clientCert: /etc/virtualcluster/pki/proxy-server.crt - {{ end }} - - name: master - connection: - proxyProtocol: Direct - - name: etcd - connection: - proxyProtocol: Direct -kind: ConfigMap -metadata: - name: kas-proxy-files - namespace: {{ .Namespace }} -` -) diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go deleted file mode 100644 index 7156773a6..000000000 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go +++ /dev/null @@ -1,34 +0,0 @@ -package host - -const ( - CoreDNSCM = ` -apiVersion: v1 -data: - Corefile: | - .:53 { - errors - health { - lameduck 5s - } - ready - kubernetes cluster.local in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - kubeconfig /etc/apiserver/kubeconfig - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - cache 30 - loop - reload - loadbalance - } -kind: ConfigMap -metadata: - name: coredns - namespace: {{ .Namespace }} -` -) diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go deleted file mode 100644 index b0b7229f8..000000000 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go +++ /dev/null @@ -1,133 +0,0 @@ -package host - -const ( - CoreDNSDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - k8s-app: kube-dns - name: {{ .Name }}-coredns - namespace: {{ .Namespace }} -spec: - progressDeadlineSeconds: 600 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kube-dns - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - creationTimestamp: null - labels: - k8s-app: kube-dns - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: - - kube-dns - topologyKey: kubernetes.io/hostname - weight: 100 - containers: - - args: - - -conf - - /etc/coredns/Corefile - image: {{ .ImageRepository }}/coredns:{{ .CoreDNSImageTag }} - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: coredns - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9153 - name: metrics - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /ready - port: 8181 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - all - readOnlyRootFilesystem: true - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/coredns - name: config-volume - readOnly: true - - mountPath: /etc/apiserver/kubeconfig - name: kubeconfig - subPath: kubeconfig - dnsPolicy: Default - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-cluster-critical - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: coredns - serviceAccountName: coredns - terminationGracePeriodSeconds: 30 - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - volumes: - - configMap: - defaultMode: 420 - items: - - key: Corefile - path: Corefile - name: coredns - name: config-volume - - name: kubeconfig - secret: - defaultMode: 420 - secretName: {{ .Name }}-admin-config - - -` -) diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go deleted file mode 100644 index 404f4010f..000000000 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go +++ /dev/null @@ -1,34 +0,0 @@ -package host - -const ( - CoreDNSService = ` -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: CoreDNS - name: kube-dns - namespace: {{ .Namespace }} -spec: - ports: - - name: dns - port: 53 - protocol: UDP - targetPort: 53 - - name: dns-tcp - port: 53 - protocol: TCP - targetPort: 53 - - name: metrics - port: 9153 - protocol: TCP - targetPort: 9153 - selector: - k8s-app: kube-dns - sessionAffinity: None - type: NodePort - -` -) diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go deleted file mode 100644 index f04ed7da7..000000000 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go +++ /dev/null @@ -1,57 +0,0 @@ -package host - -const ( - CoreDNSSA = ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns - namespace: {{ .Namespace }} -` - - CoreDNSClusterRoleBinding = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:coredns-{{ .Name }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:coredns-{{ .Name }} -subjects: -- kind: ServiceAccount - name: coredns - namespace: {{ .Namespace }} -` - - CoreDNSClusterRole = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:coredns-{{ .Name }} -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch -` -) diff --git a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go b/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go deleted file mode 100644 index d11b89c5d..000000000 --- a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go +++ /dev/null @@ -1,21 +0,0 @@ -package virtualcluster - -const ( - CoreDNSEndpoints = ` -apiVersion: v1 -kind: Endpoints -metadata: - name: kube-dns - namespace: kube-system -subsets: -- addresses: - - ip: {{ .HostNodeAddress }} - ports: - - name: dns - port: {{ .DNSPort }} - protocol: UDP - - name: metrics - port: {{ .MetricsPort }} - protocol: TCP -` -) diff --git a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go b/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go deleted file mode 100644 index 3f5df06d4..000000000 --- a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go +++ /dev/null @@ -1,30 +0,0 @@ -package virtualcluster - -const ( - CoreDNSService = ` -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: CoreDNS - name: kube-dns - namespace: kube-system -spec: - ports: - - name: dns - port: 53 - protocol: UDP - targetPort: {{ .DNSPort }} - - name: dns-tcp - port: 53 - protocol: TCP - targetPort: {{ .DNSTCPPort }} - - name: metrics - port: 9153 - protocol: TCP - targetPort: {{ .MetricsPort }} - -` -) diff --git a/pkg/kubenest/manifest/controlplane/etcd/mainfests_deployment.go b/pkg/kubenest/manifest/controlplane/etcd/mainfests_deployment.go deleted file mode 100644 index dfa407bb0..000000000 --- a/pkg/kubenest/manifest/controlplane/etcd/mainfests_deployment.go +++ /dev/null @@ -1,127 +0,0 @@ -package etcd - -const ( - // EtcdStatefulSet is etcd StatefulSet manifest - EtcdStatefulSet = ` -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - virtualCluster-app: etcd - app.kubernetes.io/managed-by: virtual-cluster-controller - namespace: {{ .Namespace }} - name: {{ .StatefulSetName }} -spec: - replicas: {{ .Replicas }} - serviceName: {{ .StatefulSetName }} - podManagementPolicy: Parallel - selector: - matchLabels: - virtualCluster-app: etcd - template: - metadata: - labels: - virtualCluster-app: etcd - spec: - automountServiceAccountToken: false - tolerations: - - key: {{ .VirtualControllerLabel }} - operator: "Exists" - effect: "NoSchedule" - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .VirtualControllerLabel }} - operator: Exists - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: virtualCluster-app - operator: In - values: - - etcd - topologyKey: kubernetes.io/hostname - containers: - - name: etcd - image: {{ .ImageRepository }}/etcd:{{ .Version }} - imagePullPolicy: IfNotPresent - command: - - /usr/local/bin/etcd - - --name=$(VIRTUAL_ETCD_NAME) - - --listen-client-urls= https://[::]:{{ .EtcdListenClientPort }} - - --listen-peer-urls=http://[::]:{{ .EtcdListenPeerPort }} - - --advertise-client-urls=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }} - - --initial-cluster={{ .InitialCluster }} - - --initial-advertise-peer-urls=http://$(VIRTUAL_ETCD_NAME).{{ .EtcdPeerServiceName }}.{{ .Namespace }}.svc.cluster.local:2380 - - --initial-cluster-state=new - - --client-cert-auth=true - - --trusted-ca-file=/etc/virtualcluster/pki/etcd/etcd-ca.crt - - --cert-file=/etc/virtualcluster/pki/etcd/etcd-server.crt - - --key-file=/etc/virtualcluster/pki/etcd/etcd-server.key - - --data-dir=/var/lib/etcd - - --snapshot-count=10000 - - --log-level=debug - - --cipher-suites={{ .EtcdCipherSuites }} - #- --peer-cert-file=/etc/virtualcluster/pki/etcd/etcd-server.crt - #- --peer-client-cert-auth=true - #- --peer-key-file=/etc/virtualcluster/pki/etcd/etcd-server.key - #- --peer-trusted-ca-file=/etc/virtualcluster/pki/etcd/etcd-ca.crt - env: - - name: PODIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - - name: VIRTUAL_ETCD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - livenessProbe: - exec: - command: - - /bin/sh - - -ec - {{ if .IPV6First }} - - etcdctl get /registry --prefix --keys-only --endpoints https://[::1]:{{ .EtcdListenClientPort }} --cacert=/etc/virtualcluster/pki/etcd/etcd-ca.crt --cert=/etc/virtualcluster/pki/etcd/etcd-server.crt --key=/etc/virtualcluster/pki/etcd/etcd-server.key - {{ else }} - - etcdctl get /registry --prefix --keys-only --endpoints https://127.0.0.1:{{ .EtcdListenClientPort }} --cacert=/etc/virtualcluster/pki/etcd/etcd-ca.crt --cert=/etc/virtualcluster/pki/etcd/etcd-server.crt --key=/etc/virtualcluster/pki/etcd/etcd-server.key - {{ end }} - failureThreshold: 3 - initialDelaySeconds: 600 - periodSeconds: 60 - successThreshold: 1 - timeoutSeconds: 10 - ports: - - containerPort: {{ .EtcdListenClientPort }} - name: client - protocol: TCP - - containerPort: {{ .EtcdListenPeerPort }} - name: server - protocol: TCP - volumeMounts: - - mountPath: /var/lib/etcd - name: {{ .EtcdDataVolumeName }} - - mountPath: /etc/virtualcluster/pki/etcd - name: etcd-cert - volumes: - - name: etcd-cert - secret: - secretName: {{ .CertsSecretName }} - volumeClaimTemplates: - - metadata: - name: etcd-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .ETCDStorageSize }} - storageClassName: {{ .ETCDStorageClass }} -` -) diff --git a/pkg/kubenest/manifest/controlplane/etcd/mainfests_service.go b/pkg/kubenest/manifest/controlplane/etcd/mainfests_service.go deleted file mode 100644 index 5b568a427..000000000 --- a/pkg/kubenest/manifest/controlplane/etcd/mainfests_service.go +++ /dev/null @@ -1,50 +0,0 @@ -package etcd - -const ( - // EtcdClientService is etcd client service manifest - EtcdClientService = ` -apiVersion: v1 -kind: Service -metadata: - labels: - virtualCluster-app: etcd - app.kubernetes.io/managed-by: virtual-cluster-controller - name: {{ .ServiceName }} - namespace: {{ .Namespace }} -spec: - ports: - - name: client - port: {{ .EtcdListenClientPort }} - protocol: TCP - targetPort: {{ .EtcdListenClientPort }} - selector: - virtualCluster-app: etcd - type: ClusterIP - ` - - // EtcdPeerService is etcd peer Service manifest - EtcdPeerService = ` - apiVersion: v1 - kind: Service - metadata: - labels: - virtualCluster-app: etcd - app.kubernetes.io/managed-by: virtual-cluster-controller - name: {{ .ServiceName }} - namespace: {{ .Namespace }} - spec: - clusterIP: None - ports: - - name: client - port: {{ .EtcdListenClientPort }} - protocol: TCP - targetPort: {{ .EtcdListenClientPort }} - - name: server - port: {{ .EtcdListenPeerPort }} - protocol: TCP - targetPort: {{ .EtcdListenPeerPort }} - selector: - virtualCluster-app: etcd - type: ClusterIP - ` -) diff --git a/pkg/kubenest/manifest/controlplane/kubecontroller/manifests_deployment.go b/pkg/kubenest/manifest/controlplane/kubecontroller/manifests_deployment.go deleted file mode 100644 index f1d6447b6..000000000 --- a/pkg/kubenest/manifest/controlplane/kubecontroller/manifests_deployment.go +++ /dev/null @@ -1,99 +0,0 @@ -package kubecontroller - -const ( - // KubeControllerManagerDeployment is KubeControllerManage deployment manifest - KubeControllerManagerDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .DeploymentName }} - namespace: {{ .Namespace }} - labels: - virtualCluster-app: kube-controller-manager - app.kubernetes.io/managed-by: virtual-cluster-controller -spec: - replicas: {{ .Replicas }} - selector: - matchLabels: - virtualCluster-app: kube-controller-manager - template: - metadata: - labels: - virtualCluster-app: kube-controller-manager - spec: - automountServiceAccountToken: false - priorityClassName: system-node-critical - tolerations: - - key: {{ .VirtualControllerLabel }} - operator: "Exists" - effect: "NoSchedule" - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .VirtualControllerLabel }} - operator: Exists - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: virtualCluster-app - operator: In - values: - - kube-controller-manager - topologyKey: kubernetes.io/hostname - containers: - - name: kube-controller-manager - image: {{ .ImageRepository }}/kube-controller-manager:{{ .Version }} - imagePullPolicy: IfNotPresent - command: - - kube-controller-manager - - --allocate-node-cidrs=true - - --kubeconfig=/etc/virtualcluster/kubeconfig - - --authentication-kubeconfig=/etc/virtualcluster/kubeconfig - - --authorization-kubeconfig=/etc/virtualcluster/kubeconfig - - '--bind-address=::' - - --client-ca-file=/etc/virtualcluster/pki/ca.crt - - --cluster-cidr={{ .PodSubnet }} - - --cluster-name=virtualcluster - - --cluster-signing-cert-file=/etc/virtualcluster/pki/ca.crt - - --cluster-signing-key-file=/etc/virtualcluster/pki/ca.key - - --controllers=*,namespace,garbagecollector,serviceaccount-token,ttl-after-finished,bootstrapsigner,csrapproving,csrcleaner,csrsigning,clusterrole-aggregation - - --leader-elect=true - {{ if not .IPV6First }} - - --node-cidr-mask-size=24 - {{ end }} - - --root-ca-file=/etc/virtualcluster/pki/ca.crt - - --service-account-private-key-file=/etc/virtualcluster/pki/virtualCluster.key - - --service-cluster-ip-range={{ .ServiceSubnet }} - - --use-service-account-credentials=true - - --v=4 - livenessProbe: - failureThreshold: 8 - httpGet: - path: /healthz - port: 10257 - scheme: HTTPS - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 15 - volumeMounts: - - name: virtualcluster-certs - mountPath: /etc/virtualcluster/pki - readOnly: true - - name: kubeconfig - mountPath: /etc/virtualcluster/kubeconfig - subPath: kubeconfig - volumes: - - name: virtualcluster-certs - secret: - secretName: {{ .VirtualClusterCertsSecret }} - - name: kubeconfig - secret: - secretName: {{ .KubeconfigSecret }} -` -) diff --git a/pkg/kubenest/manifest/controlplane/proxy/mainfests_daemonset.go b/pkg/kubenest/manifest/controlplane/proxy/mainfests_daemonset.go deleted file mode 100644 index d2652429e..000000000 --- a/pkg/kubenest/manifest/controlplane/proxy/mainfests_daemonset.go +++ /dev/null @@ -1,147 +0,0 @@ -package proxy - -const ( - ProxyDaemonSet = ` -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ .DaemonSetName }} - namespace: {{ .Namespace }} - labels: - virtualCluster-app: kube-proxy - app.kubernetes.io/managed-by: virtual-cluster-controller -spec: - revisionHistoryLimit: 10 - selector: - matchLabels: - app.kubernetes.io/managed-by: virtual-cluster-controller - template: - metadata: - labels: - app.kubernetes.io/managed-by: virtual-cluster-controller - spec: - containers: - - command: - - /usr/local/bin/kube-proxy - - --config=/var/lib/kube-proxy/config.conf - - --hostname-override=$(NODE_NAME) - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: {{ .ImageRepository }}/kube-proxy:{{ .Version }} - imagePullPolicy: IfNotPresent - name: kube-proxy - resources: {} - securityContext: - privileged: true - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/lib/kube-proxy - name: kube-proxy - - mountPath: /run/xtables.lock - name: xtables-lock - - mountPath: /lib/modules - name: lib-modules - readOnly: true - dnsPolicy: ClusterFirst - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: kube-proxy - serviceAccountName: kube-proxy - terminationGracePeriodSeconds: 30 - tolerations: - - operator: Exists - volumes: - - configMap: - defaultMode: 420 - name: kube-proxy - name: kube-proxy - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - - hostPath: - path: /lib/modules - type: "" - name: lib-modules - updateStrategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - type: RollingUpdate -` - ProxyConfigMap = ` -apiVersion: v1 -data: - config.conf: |- - apiVersion: kubeproxy.config.k8s.io/v1alpha1 - bindAddress: 0.0.0.0 - bindAddressHardFail: false - clientConnection: - acceptContentTypes: "" - burst: 100 - contentType: "" - kubeconfig: /var/lib/kube-proxy/kubeconfig.conf - qps: 100 - clusterCIDR: {{ .ClusterCIDR }} - configSyncPeriod: 0s - conntrack: - maxPerCore: null - min: null - tcpCloseWaitTimeout: null - tcpEstablishedTimeout: null - detectLocal: - bridgeInterface: "" - interfaceNamePrefix: "" - detectLocalMode: "" - enableProfiling: false - healthzBindAddress: "" - hostnameOverride: "" - iptables: - masqueradeAll: true - masqueradeBit: null - minSyncPeriod: 0s - syncPeriod: 0s - ipvs: - excludeCIDRs: - - 192.0.0.1/32 - minSyncPeriod: 0s - scheduler: "" - strictARP: false - syncPeriod: 0s - tcpFinTimeout: 0s - tcpTimeout: 0s - udpTimeout: 0s - kind: KubeProxyConfiguration - metricsBindAddress: 0.0.0.0:10249 - mode: ipvs - nodePortAddresses: null - oomScoreAdj: null - portRange: "" - showHiddenMetricsForVersion: "" - udpIdleTimeout: 0s - winkernel: - enableDSR: false - forwardHealthCheckVip: false - networkName: "" - rootHnsEndpointName: "" - sourceVip: "" - kubeconfig.conf: |- - {{ .KubeProxyKubeConfig }} -kind: ConfigMap -metadata: - labels: - app: kube-proxy - name: {{ .ConfigMapName }} - namespace: {{ .Namespace }} -` -) diff --git a/pkg/kubenest/manifest/controlplane/proxy/manifests_rbac.go b/pkg/kubenest/manifest/controlplane/proxy/manifests_rbac.go deleted file mode 100644 index 5d154f291..000000000 --- a/pkg/kubenest/manifest/controlplane/proxy/manifests_rbac.go +++ /dev/null @@ -1,11 +0,0 @@ -package proxy - -const ( - ProxySA = ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .SAName }} - namespace: {{ .Namespace }} -` -) diff --git a/pkg/kubenest/manifest/controlplane/scheduler/manifest_configmap.go b/pkg/kubenest/manifest/controlplane/scheduler/manifest_configmap.go deleted file mode 100644 index 6cd4f6eb0..000000000 --- a/pkg/kubenest/manifest/controlplane/scheduler/manifest_configmap.go +++ /dev/null @@ -1,53 +0,0 @@ -package scheduler - -const ( - VirtualClusterSchedulerConfigMap = ` -apiVersion: v1 -kind: ConfigMap -metadata: - name: scheduler-config - namespace: {{ .Namespace }} -data: - scheduler-config.yaml: | - apiVersion: kubescheduler.config.k8s.io/v1 - kind: KubeSchedulerConfiguration - leaderElection: - leaderElect: true - resourceName: {{ .DeploymentName }} - resourceNamespace: kube-system - clientConnection: - kubeconfig: /etc/virtualcluster/kubeconfig - profiles: - - schedulerName: default-scheduler - plugins: - preFilter: - disabled: - - name: "VolumeBinding" - enabled: - - name: "LeafNodeVolumeBinding" - filter: - disabled: - - name: "VolumeBinding" - - name: "TaintToleration" - enabled: - - name: "LeafNodeTaintToleration" - - name: "LeafNodeVolumeBinding" - score: - disabled: - - name: "VolumeBinding" - reserve: - disabled: - - name: "VolumeBinding" - enabled: - - name: "LeafNodeVolumeBinding" - preBind: - disabled: - - name: "VolumeBinding" - enabled: - - name: "LeafNodeVolumeBinding" - pluginConfig: - - name: LeafNodeVolumeBinding - args: - bindTimeoutSeconds: 5 -` -) diff --git a/pkg/kubenest/manifest/controlplane/scheduler/manifest_deployment.go b/pkg/kubenest/manifest/controlplane/scheduler/manifest_deployment.go deleted file mode 100644 index 09b01703f..000000000 --- a/pkg/kubenest/manifest/controlplane/scheduler/manifest_deployment.go +++ /dev/null @@ -1,81 +0,0 @@ -package scheduler - -const ( - VirtualClusterSchedulerDeployment = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .DeploymentName }} - namespace: {{ .Namespace }} - labels: - virtualCluster-app: scheduler - app.kubernetes.io/managed-by: virtual-cluster-controller -spec: - replicas: {{ .Replicas }} - selector: - matchLabels: - virtualCluster-app: scheduler - template: - metadata: - labels: - virtualCluster-app: scheduler - spec: - automountServiceAccountToken: false - tolerations: - - key: {{ .VirtualControllerLabel }} - operator: "Exists" - effect: "NoSchedule" - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .VirtualControllerLabel }} - operator: Exists - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: virtualCluster-app - operator: In - values: - - scheduler - topologyKey: kubernetes.io/hostname - containers: - - name: scheduler - image: {{ .ImageRepository }}/scheduler:{{ .Version }} - imagePullPolicy: IfNotPresent - command: - - scheduler - - --config=/etc/kubernetes/kube-scheduler/scheduler-config.yaml - - --authentication-kubeconfig=/etc/virtualcluster/kubeconfig - - --authorization-kubeconfig=/etc/virtualcluster/kubeconfig - - --v=4 - livenessProbe: - httpGet: - path: /healthz - port: 10259 - scheme: HTTPS - failureThreshold: 3 - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - volumeMounts: - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/virtualcluster/kubeconfig - - name: scheduler-config - readOnly: true - mountPath: /etc/kubernetes/kube-scheduler - volumes: - - name: kubeconfig - secret: - secretName: {{ .KubeconfigSecret }} - - name: scheduler-config - configMap: - defaultMode: 420 - name: scheduler-config -` -) diff --git a/pkg/kubenest/manifest/controlplane/scheduler/manifests_rbac.go b/pkg/kubenest/manifest/controlplane/scheduler/manifests_rbac.go deleted file mode 100644 index 2680eba2b..000000000 --- a/pkg/kubenest/manifest/controlplane/scheduler/manifests_rbac.go +++ /dev/null @@ -1,173 +0,0 @@ -package scheduler - -const ( - VirtualSchedulerSA = ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: virtualcluster-scheduler - namespace: {{ .Namespace }} -` - VirtualSchedulerRoleBinding = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: virtual-scheduler -subjects: - - kind: ServiceAccount - name: virtualcluster-scheduler - namespace: {{ .Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: virtual-scheduler -` - VirtualSchedulerRole = ` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: virtual-scheduler -rules: - - verbs: - - create - - patch - - update - apiGroups: - - '' - - events.k8s.io - resources: - - events - - verbs: - - create - apiGroups: - - coordination.k8s.io - resources: - - leases - - verbs: - - get - - update - apiGroups: - - coordination.k8s.io - resources: - - leases - resourceNames: - - virtualcluster-scheduler - - verbs: - - create - apiGroups: - - '' - resources: - - endpoints - - verbs: - - get - - update - apiGroups: - - '' - resources: - - endpoints - - verbs: - - get - - list - - watch - apiGroups: - - '' - resources: - - nodes - - verbs: - - delete - - get - - list - - watch - apiGroups: - - '' - resources: - - pods - - verbs: - - create - apiGroups: - - '' - resources: - - bindings - - pods/binding - - verbs: - - patch - - update - apiGroups: - - '' - resources: - - pods/status - - verbs: - - get - - list - - watch - apiGroups: - - '' - resources: - - replicationcontrollers - - services - - verbs: - - get - - list - - watch - apiGroups: - - apps - - extensions - resources: - - replicasets - - verbs: - - get - - list - - watch - apiGroups: - - apps - resources: - - statefulsets - - verbs: - - get - - list - - watch - apiGroups: - - policy - resources: - - poddisruptionbudgets - - verbs: - - get - - list - - watch - - update - apiGroups: - - '' - resources: - - persistentvolumeclaims - - persistentvolumes - - verbs: - - create - apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - - verbs: - - create - apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - - verbs: - - get - - list - - watch - apiGroups: - - storage.k8s.io - resources: - - '*' - - verbs: - - get - - list - - watch - apiGroups: - - '' - resources: - - configmaps - - namespaces -` -) diff --git a/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go b/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go deleted file mode 100644 index a01c399a2..000000000 --- a/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go +++ /dev/null @@ -1,25 +0,0 @@ -package virtualcluster - -const ( - APIServerExternalService = ` -apiVersion: v1 -kind: Service -metadata: - name: api-server-external-service - namespace: kosmos-system -spec: - ipFamilies: - {{- range .IPFamilies }} - - {{ . }} - {{- end }} - ipFamilyPolicy: PreferDualStack - type: NodePort - ports: - - name: https - protocol: TCP - port: {{ .ServicePort }} - targetPort: {{ .ServicePort }} - nodePort: 30443 - sessionAffinity: None -` -) diff --git a/pkg/kubenest/manifest/kosmos/manifest_deployment.go b/pkg/kubenest/manifest/kosmos/manifest_deployment.go deleted file mode 100644 index b2a83eeec..000000000 --- a/pkg/kubenest/manifest/kosmos/manifest_deployment.go +++ /dev/null @@ -1,58 +0,0 @@ -package kosmos - -const ( - ClusterTreeClusterManagerDeployment = `--- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Name }}-clustertree-cluster-manager - namespace: {{ .Namespace }} - labels: - app: clustertree-cluster-manager -spec: - replicas: 1 - selector: - matchLabels: - app: clustertree-cluster-manager - template: - metadata: - labels: - app: clustertree-cluster-manager - spec: - containers: - - name: manager - image: {{ .ImageRepository }}/clustertree-cluster-manager:{{ .Version }} - imagePullPolicy: IfNotPresent - env: - - name: APISERVER_CERT_LOCATION - value: {{ .FilePath }}/cert.pem - - name: APISERVER_KEY_LOCATION - value: {{ .FilePath }}/key.pem - - name: LEAF_NODE_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - volumeMounts: - - name: credentials - mountPath: "{{ .FilePath }}" - readOnly: true - command: - - clustertree-cluster-manager - - --multi-cluster-service=true - - --v=4 - - --leader-elect-resource-namespace=kube-system - - --kubeconfig={{ .FilePath }}/kubeconfig - volumes: - - name: credentials - secret: - secretName: {{ .Name }}-clustertree-cluster-manager -` -) - -type DeploymentReplace struct { - Namespace string - ImageRepository string - Version string - FilePath string - Name string -} diff --git a/pkg/kubenest/manifest/kosmos/manifest_secret.go b/pkg/kubenest/manifest/kosmos/manifest_secret.go deleted file mode 100644 index 4be4420f7..000000000 --- a/pkg/kubenest/manifest/kosmos/manifest_secret.go +++ /dev/null @@ -1,24 +0,0 @@ -package kosmos - -const ( - ClusterTreeClusterManagerSecret = `--- -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Name }}-clustertree-cluster-manager - namespace: {{ .Namespace }} -type: Opaque -data: - cert.pem: {{ .Cert }} - key.pem: {{ .Key }} - kubeconfig: {{ .Kubeconfig }} -` -) - -type SecretReplace struct { - Namespace string - Cert string - Key string - Kubeconfig string - Name string -} diff --git a/pkg/kubenest/tasks/anp.go b/pkg/kubenest/tasks/anp.go deleted file mode 100644 index d26ed071c..000000000 --- a/pkg/kubenest/tasks/anp.go +++ /dev/null @@ -1,448 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - "strings" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/apiserver" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewAnpTask() workflow.Task { - return workflow.Task{ - Name: "anp", - Run: runAnp, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "Upload-ProxyAgentCert", - Run: runUploadProxyAgentCert, - }, - { - Name: "deploy-anp-agent", - Run: runAnpAgent, - }, - { - Name: "deploy-anp-server", - Run: runAnpServer, - }, - { - Name: "check-anp-health", - Run: runCheckVirtualClusterAnp, - }, - }, - } -} - -func runAnp(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("anp task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[anp] Running anp task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runAnpServer(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster anp task invoked with an invalid data struct") - } - name, namespace := data.GetName(), data.GetNamespace() - kubeNestOpt := data.KubeNestOpt() - portMap := data.HostPortMap() - // install egress_selector_configuration config map - egressSelectorConfig, err := util.ParseTemplate(apiserver.EgressSelectorConfiguration, struct { - Namespace string - Name string - AnpMode string - ProxyServerPort int32 - SvcName string - }{ - Namespace: namespace, - Name: name, - ProxyServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], - SvcName: fmt.Sprintf("%s-konnectivity-server.%s.svc.cluster.local", name, namespace), - AnpMode: kubeNestOpt.KubeInKubeConfig.AnpMode, - }) - if err != nil { - return fmt.Errorf("failed to parse egress_selector_configuration config map template, err: %w", err) - } - cm := &v1.ConfigMap{} - err = yaml.Unmarshal([]byte(egressSelectorConfig), cm) - if err != nil { - return fmt.Errorf("failed to parse egress_selector_configuration config map template, err: %w", err) - } - // create configMap - err = util.CreateOrUpdateConfigMap(data.RemoteClient(), cm) - if err != nil { - return fmt.Errorf("failed to create egress_selector_configuration config map, err: %w", err) - } - err = installAnpServer(data.RemoteClient(), name, namespace, portMap, kubeNestOpt, data.VirtualCluster()) - if err != nil { - return fmt.Errorf("failed to install virtual cluster anp component, err: %w", err) - } - - klog.V(2).InfoS("[VirtualClusterAnp] Successfully installed virtual cluster anp component", "virtual cluster", klog.KObj(data)) - return nil -} - -func runAnpAgent(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-VirtualClusterAnp task invoked with an invalid data struct") - } - return installAnpAgent(data) -} - -func UninstallAnpTask() workflow.Task { - return workflow.Task{ - Name: "anp", - Run: runAnp, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "anp", - Run: uninstallAnp, - }, - }, - } -} - -func uninstallAnp(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster anp task invoked with an invalid data struct") - } - client := data.RemoteClient() - namespace := data.GetNamespace() - name := "kas-proxy-files" - err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "Failed to delete service %s/%s", name, namespace) - } - } - return nil -} -func installAnpServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - imageRepository, imageVersion := util.GetImageMessage() - clusterIP, err := util.GetEtcdServiceClusterIP(namespace, name+constants.EtcdSuffix, client) - if err != nil { - return nil - } - - IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) - if err != nil { - return err - } - - vclabel := util.GetVirtualControllerLabel() - - apiserverDeploymentBytes, err := util.ParseTemplate(apiserver.ApiserverAnpDeployment, struct { - DeploymentName, Namespace, ImageRepository, EtcdClientService, Version, VirtualControllerLabel string - ServiceSubnet, VirtualClusterCertsSecret, EtcdCertsSecret string - Replicas int - EtcdListenClientPort int32 - ClusterPort int32 - AgentPort int32 - ServerPort int32 - HealthPort int32 - AdminPort int32 - KubeconfigSecret string - Name string - AnpMode string - AdmissionPlugins bool - IPV6First bool - UseAPIServerNodePort bool - }{ - DeploymentName: util.GetAPIServerName(name), - Namespace: namespace, - ImageRepository: imageRepository, - Version: imageVersion, - VirtualControllerLabel: vclabel, - EtcdClientService: clusterIP, - ServiceSubnet: constants.APIServerServiceSubnet, - VirtualClusterCertsSecret: util.GetCertName(name), - EtcdCertsSecret: util.GetEtcdCertName(name), - Replicas: kubeNestConfiguration.KubeInKubeConfig.APIServerReplicas, - EtcdListenClientPort: constants.APIServerEtcdListenClientPort, - ClusterPort: portMap[constants.APIServerPortKey], - AgentPort: portMap[constants.APIServerNetworkProxyAgentPortKey], - ServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], - HealthPort: portMap[constants.APIServerNetworkProxyHealthPortKey], - AdminPort: portMap[constants.APIServerNetworkProxyAdminPortKey], - KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), - Name: name, - AnpMode: kubeNestConfiguration.KubeInKubeConfig.AnpMode, - AdmissionPlugins: kubeNestConfiguration.KubeInKubeConfig.AdmissionPlugins, - IPV6First: IPV6FirstFlag, - UseAPIServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort, - }) - if err != nil { - return fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) - } - klog.V(4).InfoS("[anp] apply anp server", "anp sever deploy", apiserverDeploymentBytes) - - apiserverDeployment := &appsv1.Deployment{} - if err := yaml.Unmarshal([]byte(apiserverDeploymentBytes), apiserverDeployment); err != nil { - return fmt.Errorf("error when decoding virtual cluster apiserver deployment: %w", err) - } - - if err := util.CreateOrUpdateDeployment(client, apiserverDeployment); err != nil { - return fmt.Errorf("error when creating deployment for %s, err: %w", apiserverDeployment.Name, err) - } - - if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { - apiserverServiceBytes, err := util.ParseTemplate(apiserver.ApiserverAnpAgentService, struct { - SVCName, Namespace string - ClusterPort int32 - AgentPort int32 - ServerPort int32 - HealthPort int32 - AdminPort int32 - }{ - SVCName: util.GetKonnectivityAPIServerName(name), - Namespace: namespace, - ClusterPort: portMap[constants.APIServerPortKey], - AgentPort: portMap[constants.APIServerNetworkProxyAgentPortKey], - ServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], - HealthPort: portMap[constants.APIServerNetworkProxyHealthPortKey], - AdminPort: portMap[constants.APIServerNetworkProxyAdminPortKey], - }) - if err != nil { - return fmt.Errorf("error when parsing virtual cluster apiserver svc template: %w", err) - } - klog.V(4).InfoS("[anp] apply anp server svc", "anp sever svc deploy", apiserverServiceBytes) - - apiserverSvc := &v1.Service{} - if err := yaml.Unmarshal([]byte(apiserverServiceBytes), apiserverSvc); err != nil { - return fmt.Errorf("error when decoding virtual cluster apiserver svc: %w", err) - } - - if err := util.CreateOrUpdateService(client, apiserverSvc); err != nil { - return fmt.Errorf("error when creating svc for %s, err: %w", apiserverSvc.Name, err) - } - } - return nil -} - -func installAnpAgent(data InitData) error { - client := data.RemoteClient() - name := data.GetName() - namespace := data.GetNamespace() - portMap := data.HostPortMap() - kubeNestOpt := data.KubeNestOpt() - anpAgentManifestBytes, vcClient, err2 := getAnpAgentManifest(client, name, namespace, portMap, kubeNestOpt, data.VirtualCluster()) - if err2 != nil { - return err2 - } - actionFunc := func(ctx context.Context, c dynamic.Interface, u *unstructured.Unstructured) error { - // create the object - return apiclient.TryRunCommand(func() error { - return util.ReplaceObject(vcClient, u) - }, apiclient.DefaultRetryCount) - } - return util.ForEachObjectInYAML(context.TODO(), vcClient, []byte(anpAgentManifestBytes), "", actionFunc) -} - -func getAnpAgentManifest(client clientset.Interface, name string, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) (string, dynamic.Interface, error) { - imageRepository, imageVersion := util.GetImageMessage() - // get apiServer hostIp - var proxyServerHost []string - var err error - if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { - proxyServerHost, err = getDeploymentHostIPs(client, namespace, util.GetAPIServerName(name)) - } else { - proxyServerHost, err = getDeploymentPodIPs(client, namespace, util.GetAPIServerName(name)) - } - - if err != nil { - klog.Warningf("Failed to get apiserver hostIp, err: %v", err) - // ignore if can't get the hostIp when uninstall the deployment - proxyServerHost = []string{"127.0.0.1"} - } - - anpAgentManifeattBytes, err := util.ParseTemplate(apiserver.AnpAgentManifest, struct { - ImageRepository string - Version string - AgentPort int32 - ProxyServerHost []string - AnpMode string - AgentCertName string - }{ - ImageRepository: imageRepository, - Version: imageVersion, - AgentPort: portMap[constants.APIServerNetworkProxyAgentPortKey], - ProxyServerHost: proxyServerHost, - AnpMode: kubeNestConfiguration.KubeInKubeConfig.AnpMode, - AgentCertName: util.GetCertName(name), - }) - if err != nil { - return "", nil, fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) - } - klog.V(4).InfoS("[anp] apply anp agent", "agent manifest", anpAgentManifeattBytes) - vcClient, err := getVcDynamicClient(client, name, namespace) - if err != nil { - return "", nil, fmt.Errorf("error when get vcClient, err: %v", err) - } - return anpAgentManifeattBytes, vcClient, nil -} - -// getDeploymentPodIPs 获取指定 Deployment 的所有 Pod IP 地址 -func getDeploymentPodIPs(clientset clientset.Interface, namespace, deploymentName string) ([]string, error) { - deployment, err := clientset.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting deployment: %v", err) - } - - labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector) - listOptions := metav1.ListOptions{LabelSelector: labelSelector} - - pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), listOptions) - if err != nil { - return nil, fmt.Errorf("error listing pods: %v", err) - } - - var podIPs []string - for _, pod := range pods.Items { - if pod.Status.Phase == v1.PodRunning { - podIPs = append(podIPs, pod.Status.PodIP) - } - } - - return podIPs, nil -} - -func getDeploymentHostIPs(clientset clientset.Interface, namespace, deploymentName string) ([]string, error) { - deployment, err := clientset.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting deployment: %v", err) - } - - labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector) - listOptions := metav1.ListOptions{LabelSelector: labelSelector} - - pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), listOptions) - if err != nil { - return nil, fmt.Errorf("error listing pods: %v", err) - } - - var podIPs []string - for _, pod := range pods.Items { - if pod.Status.Phase == v1.PodRunning { - podIPs = append(podIPs, pod.Status.HostIP) - } - } - - return podIPs, nil -} - -func getVcDynamicClient(client clientset.Interface, name, namespace string) (dynamic.Interface, error) { - secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), - util.GetAdminConfigSecretName(name), metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return nil, err - } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - return dynamicClient, nil -} -func GetVcClientset(client clientset.Interface, name, namespace string) (clientset.Interface, error) { - secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), - util.GetAdminConfigSecretName(name), metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return nil, err - } - - vcClient, err := clientset.NewForConfig(config) - if err != nil { - return nil, err - } - - return vcClient, nil -} - -func runUploadProxyAgentCert(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("upload proxy agent cert task invoked with an invalid data struct") - } - name, namespace := data.GetName(), data.GetNamespace() - certList := data.CertList() - certsData := make(map[string][]byte, len(certList)) - for _, c := range certList { - // only upload apisever cert - if strings.Contains(c.KeyName(), "apiserver") { - certsData[c.KeyName()] = c.KeyData() - certsData[c.CertName()] = c.CertData() - } - } - vcClient, err := GetVcClientset(data.RemoteClient(), name, namespace) - if err != nil { - return fmt.Errorf("failed to get virtual cluster client, err: %w", err) - } - - err = apiclient.TryRunCommand(func() error { - return createOrUpdateSecret(vcClient, &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.GetCertName(data.GetName()), - Namespace: "kube-system", - Labels: VirtualClusterControllerLabel, - }, - Data: certsData, - }) - }, apiclient.DefaultRetryCount) - if err != nil { - return fmt.Errorf("failed to upload agent cert to tenant, err: %w", err) - } - - klog.V(2).InfoS("[Upload-ProxyAgentCert] Successfully uploaded virtual cluster agent certs to secret", "virtual cluster", klog.KObj(data)) - return nil -} - -func runCheckVirtualClusterAnp(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-VirtualClusterAnp task invoked with an invalid data struct") - } - - checker := apiclient.NewVirtualClusterChecker(data.RemoteClient(), constants.ComponentBeReadyTimeout) - - err := checker.WaitForSomePods(virtualClusterAnpLabels.String(), data.GetNamespace(), 1) - if err != nil { - return fmt.Errorf("checking for virtual cluster anp to ready timeout, err: %w", err) - } - - klog.V(2).InfoS("[check-VirtualClusterAPIServer] the virtual cluster anp is ready", "virtual cluster", klog.KObj(data)) - return nil -} diff --git a/pkg/kubenest/tasks/apiserver.go b/pkg/kubenest/tasks/apiserver.go deleted file mode 100644 index 48e02dd56..000000000 --- a/pkg/kubenest/tasks/apiserver.go +++ /dev/null @@ -1,113 +0,0 @@ -package tasks - -import ( - "fmt" - - "github.com/pkg/errors" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewVirtualClusterApiserverTask() workflow.Task { - return workflow.Task{ - Name: "apiserver", - Run: runApiserver, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "deploy-apiserver", - Run: runVirtualClusterAPIServer, - }, - { - Name: "check-apiserver", - Run: runCheckVirtualClusterAPIServer, - }, - }, - } -} - -func runApiserver(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("apiserver task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[apiserver] Running apiserver task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runVirtualClusterAPIServer(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster apiserver task invoked with an invalid data struct") - } - - err := controlplane.EnsureVirtualClusterAPIServer( - data.RemoteClient(), - data.GetName(), - data.GetNamespace(), - data.HostPortMap(), - data.KubeNestOpt(), - data.VirtualCluster(), - ) - if err != nil { - return fmt.Errorf("failed to install virtual cluster apiserver component, err: %w", err) - } - - klog.V(2).InfoS("[VirtualClusterApiserver] Successfully installed virtual cluster apiserver component", "virtual cluster", klog.KObj(data)) - return nil -} - -func runCheckVirtualClusterAPIServer(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-VirtualClusterAPIServer task invoked with an invalid data struct") - } - - checker := apiclient.NewVirtualClusterChecker(data.RemoteClient(), constants.ComponentBeReadyTimeout) - - err := checker.WaitForSomePods(virtualClusterApiserverLabels.String(), data.GetNamespace(), 1) - if err != nil { - return fmt.Errorf("checking for virtual cluster apiserver to ready timeout, err: %w", err) - } - - klog.V(2).InfoS("[check-VirtualClusterAPIServer] the virtual cluster apiserver is ready", "virtual cluster", klog.KObj(data)) - return nil -} - -func UninstallVirtualClusterApiserverTask() workflow.Task { - return workflow.Task{ - Name: "apiserver", - Run: runApiserver, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: constants.APIServer, - Run: uninstallVirtualClusterAPIServer, - }, - }, - } -} - -func uninstallVirtualClusterAPIServer(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster apiserver task invoked with an invalid data struct") - } - - err := controlplane.DeleteVirtualClusterAPIServer( - data.RemoteClient(), - data.GetName(), - data.GetNamespace(), - ) - if err != nil { - return fmt.Errorf("failed to install virtual cluster apiserver component, err: %w", err) - } - - klog.V(2).InfoS("[VirtualClusterApiserver] Successfully uninstalled virtual cluster apiserver component", "virtual cluster", klog.KObj(data)) - return nil -} diff --git a/pkg/kubenest/tasks/cert.go b/pkg/kubenest/tasks/cert.go deleted file mode 100644 index 74abe21d8..000000000 --- a/pkg/kubenest/tasks/cert.go +++ /dev/null @@ -1,148 +0,0 @@ -package tasks - -import ( - "context" - "errors" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/kubenest/util/cert" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewCertTask() workflow.Task { - return workflow.Task{ - Name: "Certs", - Run: runCerts, - Skip: skipCerts, - RunSubTasks: true, - Tasks: newCertSubTasks(), - } -} - -func runCerts(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("certs task invoked with an invalid data struct") - } - klog.V(4).InfoS("[certs] Running certs task", "virtual cluster", klog.KObj(data)) - return nil -} - -func skipCerts(d workflow.RunData) (bool, error) { - data, ok := d.(InitData) - if !ok { - return false, errors.New("certs task invoked with an invalid data struct") - } - - secretName := util.GetCertName(data.GetName()) - secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), secretName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - if err := data.LoadCertFromSecret(secret); err != nil { - return false, err - } - - klog.V(4).InfoS("[certs] Successfully loaded certs form secret", "secret", secret.Name, "virtual cluster", klog.KObj(data)) - klog.V(2).InfoS("[certs] Skip certs task, found previous certificates in secret", "virtual cluster", klog.KObj(data)) - return true, nil -} - -func newCertSubTasks() []workflow.Task { - var subTasks []workflow.Task - caCert := map[string]*cert.CertConfig{} - - for _, cert := range cert.GetDefaultCertList() { - var task workflow.Task - if cert.CAName == "" { - task = workflow.Task{Name: cert.Name, Run: runCATask(cert)} - caCert[cert.Name] = cert - } else { - task = workflow.Task{Name: cert.Name, Run: runCertTask(cert, caCert[cert.CAName])} - } - - subTasks = append(subTasks, task) - } - - return subTasks -} - -func runCertTask(cc, caCert *cert.CertConfig) func(d workflow.RunData) error { - return func(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return fmt.Errorf("certs task invoked with an invalid data struct") - } - - if caCert == nil { - return fmt.Errorf("unexpected empty ca cert for %s", cc.Name) - } - - if cc.CAName != caCert.Name { - return fmt.Errorf("expected CAname for %s, but was %s", cc.CAName, cc.Name) - } - - if err := mutateCertConfig(data, cc); err != nil { - return fmt.Errorf("error when mutate cert altNames for %s, err: %w", cc.Name, err) - } - - caCert := data.GetCert(cc.CAName) - cert, err := cert.CreateCertAndKeyFilesWithCA(cc, caCert.CertData(), caCert.KeyData()) - if err != nil { - return err - } - - data.AddCert(cert) - - klog.V(2).InfoS("[certs] Successfully generated certificate", "certName", cc.Name, "caName", cc.CAName) - return nil - } -} - -func runCATask(kc *cert.CertConfig) func(d workflow.RunData) error { - return func(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("certs task invoked with an invalid data struct") - } - - if kc.CAName != "" { - return fmt.Errorf("this function should only be used for CAs, but cert %s has CA %s", kc.Name, kc.CAName) - } - klog.V(4).InfoS("[certs] Creating a new certificate authority", "certName", kc.Name) - - cert, err := cert.NewCertificateAuthority(kc) - if err != nil { - return err - } - - klog.V(2).InfoS("[certs] Successfully generated ca certificate", "certName", kc.Name) - - data.AddCert(cert) - return nil - } -} - -func mutateCertConfig(data InitData, cc *cert.CertConfig) error { - if cc.AltNamesMutatorFunc != nil { - err := cc.AltNamesMutatorFunc(&cert.AltNamesMutatorConfig{ - Name: data.GetName(), - Namespace: data.GetNamespace(), - ControlplaneAddr: data.ControlplaneAddress(), - ClusterIPs: data.ServiceClusterIP(), - ExternalIP: data.ExternalIP(), - ExternalIPs: data.ExternalIPs(), - VipMap: data.VipMap(), - }, cc) - if err != nil { - return err - } - } - - return nil -} diff --git a/pkg/kubenest/tasks/check.go b/pkg/kubenest/tasks/check.go deleted file mode 100644 index d951984d6..000000000 --- a/pkg/kubenest/tasks/check.go +++ /dev/null @@ -1,90 +0,0 @@ -package tasks - -import ( - "errors" - "fmt" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -var ( - kubeControllerManagerLabels = labels.Set{"virtualCluster-app": constants.KubeControllerManager} - virtualClusterManagerLabels = labels.Set{"virtualCluster-app": constants.VirtualClusterScheduler} - virtualClusterApiserverLabels = labels.Set{"virtualCluster-app": constants.APIServer} - virtualClusterEtcdLabels = labels.Set{"virtualCluster-app": constants.Etcd} - virtualClusterAnpLabels = labels.Set{"virtualCluster-anp": constants.APIServerAnp} -) - -func NewCheckApiserverHealthTask() workflow.Task { - return workflow.Task{ - Name: "check-apiserver-health", - Run: runCheckApiserver, - } -} - -func NewCheckControlPlaneTask() workflow.Task { - return workflow.Task{ - Name: "check-controlPlane-health", - Run: runCheckControlPlane, - RunSubTasks: true, - Tasks: []workflow.Task{ - newCheckControlPlaneSubTask("KubeControllerManager", kubeControllerManagerLabels), - newCheckControlPlaneSubTask("VirtualClusterScheduler", virtualClusterManagerLabels), - }, - } -} - -func newCheckControlPlaneSubTask(component string, ls labels.Set) workflow.Task { - return workflow.Task{ - Name: component, - Run: runCheckControlPlaneSubTask(component, ls), - } -} - -func runCheckApiserver(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return fmt.Errorf("check-apiserver-health task invoked with an invalid data struct") - } - klog.V(4).InfoS("[check-apiserver-health] Running task", "virtual cluster", klog.KObj(data)) - - checker := apiclient.NewVirtualClusterChecker(data.RemoteClient(), constants.ComponentBeReadyTimeout) - - if err := apiclient.TryRunCommand(checker.WaitForAPI, 3); err != nil { - return fmt.Errorf("the virtual cluster apiserver is unhealthy, err: %w", err) - } - klog.V(2).InfoS("[check-apiserver-health] the etcd and virtualCluster-apiserver is healthy", "virtual cluster", klog.KObj(data)) - return nil -} - -func runCheckControlPlane(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-controlPlane task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[check-controlPlane] Running wait-controlPlane task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runCheckControlPlaneSubTask(component string, ls labels.Set) func(r workflow.RunData) error { - return func(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-controlPlane task invoked with an invalid data struct") - } - - checker := apiclient.NewVirtualClusterChecker(data.RemoteClient(), constants.ComponentBeReadyTimeout) - if err := checker.WaitForSomePods(ls.String(), data.GetNamespace(), 2); err != nil { - return fmt.Errorf("checking for %s to ready timeout, err: %w", component, err) - } - - klog.V(2).InfoS("[check-ControlPlane] component status is ready", "component", component, "virtual cluster", klog.KObj(data)) - return nil - } -} diff --git a/pkg/kubenest/tasks/component.go b/pkg/kubenest/tasks/component.go deleted file mode 100644 index 9f04a5709..000000000 --- a/pkg/kubenest/tasks/component.go +++ /dev/null @@ -1,106 +0,0 @@ -package tasks - -import ( - "errors" - "fmt" - - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewComponentTask() workflow.Task { - return workflow.Task{ - Name: "components", - Run: runComponents, - RunSubTasks: true, - Tasks: []workflow.Task{ - newComponentSubTask(constants.KubeControllerManagerComponent), - newComponentSubTask(constants.VirtualClusterSchedulerComponent), - }, - } -} - -func runComponents(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("components task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[components] Running components task", "virtual cluster", klog.KObj(data)) - return nil -} - -func newComponentSubTask(component string) workflow.Task { - return workflow.Task{ - Name: component, - Run: runComponentSubTask(component), - } -} - -func runComponentSubTask(component string) func(r workflow.RunData) error { - return func(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("components task invoked with an invalid data struct") - } - - kubeNestOpt := data.KubeNestOpt() - - err := controlplane.EnsureControlPlaneComponent( - component, - data.GetName(), - data.GetNamespace(), - data.RemoteClient(), - kubeNestOpt.KubeInKubeConfig.ClusterCIDR, - ) - if err != nil { - return fmt.Errorf("failed to apply component %s, err: %w", component, err) - } - - klog.V(2).InfoS("[components] Successfully applied component", "component", component, "virtual cluster", klog.KObj(data)) - return nil - } -} - -func UninstallComponentTask() workflow.Task { - return workflow.Task{ - Name: "components", - Run: runComponents, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: constants.KubeControllerManagerComponent, - Run: uninstallComponentSubTask(constants.KubeControllerManagerComponent), - }, - { - Name: constants.VirtualClusterSchedulerComponent, - Run: uninstallComponentSubTask(constants.VirtualClusterSchedulerComponent), - }, - }, - } -} - -func uninstallComponentSubTask(component string) func(r workflow.RunData) error { - return func(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("components task invoked with an invalid data struct") - } - - err := controlplane.DeleteControlPlaneComponent( - component, - data.GetName(), - data.GetNamespace(), - data.RemoteClient(), - ) - if err != nil { - return fmt.Errorf("failed to uninstall component %s, err: %w", component, err) - } - - klog.V(2).InfoS("[components] Successfully uninstalled component", "component", component, "virtual cluster", klog.KObj(data)) - return nil - } -} diff --git a/pkg/kubenest/tasks/coredns.go b/pkg/kubenest/tasks/coredns.go deleted file mode 100644 index e181b8039..000000000 --- a/pkg/kubenest/tasks/coredns.go +++ /dev/null @@ -1,346 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/pkg/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewCoreDNSTask() workflow.Task { - return workflow.Task{ - Name: "coreDns", - Run: runCoreDNS, - Skip: skipCoreDNS, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "deploy-core-dns-in-host-cluster", - Run: runCoreDNSHostTask, - }, - { - Name: "check-core-dns", - Run: runCheckCoreDNSTask, - }, - { - Name: "deploy-core-dns-service-in-virtual-cluster", - Run: runCoreDNSVirtualTask, - }, - }, - } -} - -func skipCoreDNS(d workflow.RunData) (bool, error) { - data, ok := d.(InitData) - if !ok { - return false, errors.New("coreDns task invoked with an invalid data struct") - } - - vc := data.VirtualCluster() - if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.UseTenantDNS { - return true, nil - } - return false, nil -} - -func runCoreDNS(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("coreDns task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[coreDns] Running coreDns task", "virtual cluster", klog.KObj(data)) - return nil -} - -func UninstallCoreDNSTask() workflow.Task { - return workflow.Task{ - Name: "coredns", - Run: runCoreDNS, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "remove-core-dns-in-host-cluster", - Run: uninstallCorednsHostTask, - }, - }, - } -} - -func getCoreDNSHostComponentsConfig(client clientset.Interface, keyName string) ([]ComponentConfig, error) { - cm, err := client.CoreV1().ConfigMaps(constants.KosmosNs).Get(context.Background(), constants.ManifestComponentsConfigMap, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - - yamlData, ok := cm.Data[keyName] - if !ok { - return nil, errors.Wrap(err, "Read manifests components config error") - } - - var components []ComponentConfig - err = yaml.Unmarshal([]byte(yamlData), &components) - if err != nil { - return nil, errors.Wrap(err, "Unmarshal manifests component config error") - } - return components, nil -} - -// in host -func runCoreDNSHostTask(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster manifests-components task invoked with an invalid data struct") - } - - dynamicClient := data.DynamicClient() - - components, err := getCoreDNSHostComponentsConfig(data.RemoteClient(), constants.HostCoreDnsComponents) - if err != nil { - return err - } - - imageRepository, _ := util.GetImageMessage() - - for _, component := range components { - klog.V(2).Infof("Deploy component %s", component.Name) - - templatedMapping := map[string]interface{}{ - "Namespace": data.GetNamespace(), - "Name": data.GetName(), - "ImageRepository": imageRepository, - } - for k, v := range data.PluginOptions() { - templatedMapping[k] = v - } - err = applyYMLTemplate(dynamicClient, component.Path, templatedMapping) - if err != nil { - return err - } - } - return nil -} - -func uninstallCorednsHostTask(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster manifests-components task invoked with an invalid data struct") - } - - dynamicClient := data.DynamicClient() - - components, err := getCoreDNSHostComponentsConfig(data.RemoteClient(), constants.HostCoreDnsComponents) - if err != nil { - return err - } - - imageRepository, _ := util.GetImageMessage() - - for _, component := range components { - klog.V(2).Infof("Delete component %s", component.Name) - - templatedMapping := map[string]interface{}{ - "Namespace": data.GetNamespace(), - "Name": data.GetName(), - "ImageRepository": imageRepository, - } - err = deleteYMLTemplate(dynamicClient, component.Path, templatedMapping) - if err != nil { - return err - } - } - return nil -} - -// in host -func runCheckCoreDNSTask(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster manifests-components task invoked with an invalid data struct") - } - ctx := context.TODO() - - waitCtx, cancel := context.WithTimeout(ctx, 40*time.Second) - isReady := false - - wait.UntilWithContext(waitCtx, func(ctx context.Context) { - _, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), constants.KubeDNSSVCName, metav1.GetOptions{}) - if err == nil { - // TODO: check endpoints - isReady = true - cancel() - } - }, 10*time.Second) // Interval time - - if isReady { - return nil - } - - return fmt.Errorf("kube-dns is not ready") -} - -func runCoreDNSVirtualTask(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster coreDns task invoked with an invalid data struct") - } - - secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return err - } - - components, err := getCoreDNSHostComponentsConfig(data.RemoteClient(), constants.VirtualCoreDNSComponents) - if err != nil { - return err - } - - kubesvc, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), constants.KubeDNSSVCName, metav1.GetOptions{}) - if err != nil { - return err - } - - DNSPort := int32(0) - DNSTCPPort := int32(0) - MetricsPort := int32(0) - - for _, port := range kubesvc.Spec.Ports { - if port.Name == "dns" { - DNSPort = port.NodePort - } - if port.Name == "dns-tcp" { - DNSTCPPort = port.NodePort - } - if port.Name == "metrics" { - MetricsPort = port.NodePort - } - } - HostNodeAddress := os.Getenv("EXECTOR_HOST_MASTER_NODE_IP") - if len(HostNodeAddress) == 0 { - return fmt.Errorf("get master node ip from env failed") - } - - for _, component := range components { - klog.V(2).Infof("Deploy component %s", component.Name) - - templatedMapping := map[string]interface{}{ - "Namespace": data.GetNamespace(), - "Name": data.GetName(), - "DNSPort": DNSPort, - "DNSTCPPort": DNSTCPPort, - "MetricsPort": MetricsPort, - "HostNodeAddress": HostNodeAddress, - } - for k, v := range data.PluginOptions() { - templatedMapping[k] = v - } - err = applyYMLTemplate(dynamicClient, component.Path, templatedMapping) - if err != nil { - return err - } - } - return nil -} - -// nolint:dupl -func applyYMLTemplate(dynamicClient dynamic.Interface, manifestGlob string, templateMapping map[string]interface{}) error { - manifests, err := filepath.Glob(manifestGlob) - klog.V(2).Infof("Component Manifests %s", manifestGlob) - if err != nil { - return err - } - if manifests == nil { - return errors.Errorf("No matching file for pattern %v", manifestGlob) - } - for _, manifest := range manifests { - klog.V(2).Infof("Applying %s", manifest) - var obj unstructured.Unstructured - bytesData, err := os.ReadFile(manifest) - if err != nil { - return errors.Wrapf(err, "Read file %s error", manifest) - } - - templateBytes, err := util.ParseTemplate(string(bytesData), templateMapping) - if err != nil { - return errors.Wrapf(err, "Parse template %s error", manifest) - } - - err = yaml.Unmarshal([]byte(templateBytes), &obj) - if err != nil { - return errors.Wrapf(err, "Unmarshal manifest bytes data error") - } - - err = apiclient.TryRunCommand(func() error { - return util.ApplyObject(dynamicClient, &obj) - }, 3) - if err != nil { - return errors.Wrapf(err, "Create object error") - } - } - return nil -} - -// nolint:dupl -func deleteYMLTemplate(dynamicClient dynamic.Interface, manifestGlob string, templateMapping map[string]interface{}) error { - manifests, err := filepath.Glob(manifestGlob) - klog.V(2).Infof("Component Manifests %s", manifestGlob) - if err != nil { - return err - } - if manifests == nil { - return errors.Errorf("No matching file for pattern %v", manifestGlob) - } - for _, manifest := range manifests { - klog.V(2).Infof("Deleting %s", manifest) - var obj unstructured.Unstructured - bytesData, err := os.ReadFile(manifest) - if err != nil { - return errors.Wrapf(err, "Read file %s error", manifest) - } - - templateBytes, err := util.ParseTemplate(string(bytesData), templateMapping) - if err != nil { - return errors.Wrapf(err, "Parse template %s error", manifest) - } - - err = yaml.Unmarshal([]byte(templateBytes), &obj) - if err != nil { - return errors.Wrapf(err, "Unmarshal manifest bytes data error") - } - - err = util.DeleteObject(dynamicClient, obj.GetNamespace(), obj.GetName(), &obj) - if err != nil { - return errors.Wrapf(err, "Delete object error") - } - } - return nil -} diff --git a/pkg/kubenest/tasks/data.go b/pkg/kubenest/tasks/data.go deleted file mode 100644 index 814899b40..000000000 --- a/pkg/kubenest/tasks/data.go +++ /dev/null @@ -1,30 +0,0 @@ -package tasks - -import ( - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" - "github.com/kosmos.io/kosmos/pkg/kubenest/util/cert" -) - -type InitData interface { - cert.CertStore - GetName() string - GetNamespace() string - ControlplaneAddress() string - ServiceClusterIP() []string - RemoteClient() clientset.Interface - KosmosClient() versioned.Interface - DataDir() string - VirtualCluster() *v1alpha1.VirtualCluster - ExternalIP() string - ExternalIPs() []string - HostPort() int32 - HostPortMap() map[string]int32 - VipMap() map[string]string - DynamicClient() *dynamic.DynamicClient - KubeNestOpt() *v1alpha1.KubeNestConfiguration - PluginOptions() map[string]string -} diff --git a/pkg/kubenest/tasks/endpoint.go b/pkg/kubenest/tasks/endpoint.go deleted file mode 100644 index be78866a7..000000000 --- a/pkg/kubenest/tasks/endpoint.go +++ /dev/null @@ -1,76 +0,0 @@ -package tasks - -import ( - "context" - - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/common" - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewEndPointTask() workflow.Task { - return workflow.Task{ - Name: "endpoint", - Run: runEndpoint, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "deploy-endpoint-in-virtual-cluster", - Run: runEndPointInVirtualClusterTask, - }, - }, - } -} - -func runEndpoint(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("endPoint task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[endPoint] Running endPoint task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runEndPointInVirtualClusterTask(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster endpoint task invoked with an invalid data struct") - } - - secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return err - } - - kubeClient, err := kubernetes.NewForConfig(config) - if err != nil { - return err - } - - apiServerExternalResource := common.APIServerExternalResource{ - Namespace: data.GetNamespace(), - Name: data.GetName(), - Vc: data.VirtualCluster(), - RootClientSet: data.RemoteClient(), - } - - err = controlplane.EnsureAPIServerExternalEndPoint(kubeClient, apiServerExternalResource) - if err != nil { - return err - } - return nil -} diff --git a/pkg/kubenest/tasks/etcd.go b/pkg/kubenest/tasks/etcd.go deleted file mode 100644 index 38593c04f..000000000 --- a/pkg/kubenest/tasks/etcd.go +++ /dev/null @@ -1,173 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -var ( - etcdLabels = labels.Set{constants.Label: constants.Etcd} -) - -func NewEtcdTask() workflow.Task { - return workflow.Task{ - Name: "Etcd", - Run: runEtcd, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "deploy-etcd", - Run: runDeployEtcd, - }, - { - Name: "check-etcd", - Run: runCheckEtcd, - }, - }, - } -} - -func runEtcd(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("etcd task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[etcd] Running etcd task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runDeployEtcd(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("deploy-etcd task invoked with an invalid data struct") - } - - err := controlplane.EnsureVirtualClusterEtcd(data.RemoteClient(), data.GetName(), data.GetNamespace(), data.KubeNestOpt(), data.VirtualCluster()) - if err != nil { - return fmt.Errorf("failed to install etcd component, err: %w", err) - } - - klog.V(2).InfoS("[deploy-etcd] Successfully installed etcd component", "virtual cluster", klog.KObj(data)) - return nil -} - -func runCheckEtcd(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-etcd task invoked with an invalid data struct") - } - - checker := apiclient.NewVirtualClusterChecker(data.RemoteClient(), constants.ComponentBeReadyTimeout) - - if err := checker.WaitForSomePods(etcdLabels.String(), data.GetNamespace(), 1); err != nil { - return fmt.Errorf("checking for virtual cluster etcd to ready timeout, err: %w", err) - } - - klog.V(2).InfoS("[check-etcd] the etcd pods is ready", "virtual cluster", klog.KObj(data)) - return nil -} - -func UninstallEtcdTask() workflow.Task { - return workflow.Task{ - Name: "Etcd", - Run: runEtcd, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: constants.Etcd, - Run: UninstallEtcd, - }, - }, - } -} - -func DeleteEtcdPvcTask() workflow.Task { - return workflow.Task{ - Name: "Etcd", - Run: runEtcd, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: constants.Etcd, - Run: deleteEtcdPvc, - }, - { - Name: "check-pvc-deleted", - Run: checkPvcDeleted, - }, - }, - } -} - -func UninstallEtcd(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("destroy-etcd task invoked with an invalid data struct") - } - - err := controlplane.DeleteVirtualClusterEtcd(data.RemoteClient(), data.GetName(), data.GetNamespace()) - if err != nil { - return fmt.Errorf("failed to uninstall etcd component, err: %w", err) - } - - klog.V(2).InfoS("[uninstall-etcd] Successfully uninstalled etcd component", "virtual cluster", klog.KObj(data)) - return nil -} - -func deleteEtcdPvc(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("destroy-etcd task invoked with an invalid data struct") - } - - for i := 0; i < constants.EtcdReplicas; i++ { - pvc := fmt.Sprintf("%s-%s-etcd-%d", constants.EtcdDataVolumeName, data.GetName(), i) - klog.V(2).Infof("Delete pvc %s/%s", pvc, data.GetNamespace()) - err := data.RemoteClient().CoreV1().PersistentVolumeClaims(data.GetNamespace()).Delete(context.TODO(), pvc, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - continue - } - return errors.Wrapf(err, "Delete pvc %s error", pvc) - } - } - return nil -} - -func checkPvcDeleted(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("destroy-etcd task invoked with an invalid data struct") - } - - klog.V(2).Infof("Check if %s etcd pvc deleted", data.GetName()) - err := wait.PollImmediate(5*time.Second, constants.ComponentBeDeletedTimeout, func() (done bool, err error) { - pvcList, err := data.RemoteClient().CoreV1().PersistentVolumeClaims(data.GetNamespace()).List(context.TODO(), metav1.ListOptions{LabelSelector: virtualClusterEtcdLabels.String()}) - if err != nil { - return true, errors.Wrap(err, "List pods error") - } - if len(pvcList.Items) == 0 { - return true, nil - } - klog.V(2).Infof("Waiting for pvc deleted. current exist num: %d", len(pvcList.Items)) - return false, nil - }) - if err != nil { - return errors.Wrapf(err, "Failed delete etcd pvc") - } - return nil -} diff --git a/pkg/kubenest/tasks/manifests_components.go b/pkg/kubenest/tasks/manifests_components.go deleted file mode 100644 index 4e8b3f7fc..000000000 --- a/pkg/kubenest/tasks/manifests_components.go +++ /dev/null @@ -1,251 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -type ComponentConfig struct { - Name string `json:"name" yaml:"name"` - Path string `json:"path" yaml:"path"` -} - -type SkipComponentCondition struct { - Condition bool - ComponentName string -} - -func NewComponentsFromManifestsTask() workflow.Task { - return workflow.Task{ - Name: "manifests-components", - Run: runComponentsFromManifests, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "deploy-manifests-components", - Run: applyComponentsManifests, - }, - }, - } -} - -func runComponentsFromManifests(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("manifests-components task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[manifests-components] Running manifests-components task", "virtual cluster", klog.KObj(data)) - return nil -} - -func getSkipComponentsForVirtualCluster(condition []*SkipComponentCondition) map[string]bool { - skipComponents := map[string]bool{} - for _, c := range condition { - skipComponents[c.ComponentName] = c.Condition - } - return skipComponents -} - -func applyComponentsManifests(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster manifests-components task invoked with an invalid data struct") - } - keepalivedReplicas := constants.VipKeepAlivedReplicas - secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return err - } - - components, err := getComponentsConfig(data.RemoteClient()) - if err != nil { - return err - } - - templatedMapping := make(map[string]interface{}, 2) - templatedMapping["KUBE_PROXY_KUBECONFIG"] = string(secret.Data[constants.KubeConfig]) - imageRepository, _ := util.GetImageMessage() - templatedMapping["ImageRepository"] = imageRepository - - templatedMapping["PillarLocalDNS"] = constants.NodeLocalDNSIp - templatedMapping["PillarDNSDomain"] = constants.NodeLocalDNSClusterDomain - templatedMapping["PillarDNSServer"] = "" - - for k, v := range data.PluginOptions() { - templatedMapping[k] = v - } - keepalivedEnable := data.VipMap() != nil && data.VipMap()[constants.VcVipStatusKey] != "" - if keepalivedEnable { - templatedMapping["Vip"] = data.VipMap()[constants.VcVipStatusKey] - // use min replicas - nodeCount := data.VirtualCluster().Spec.PromotePolicies[0].NodeCount - if nodeCount < constants.VipKeepAlivedReplicas { - keepalivedReplicas = int(nodeCount) - } - - templatedMapping["KeepalivedReplicas"] = keepalivedReplicas - } - - UseTenantDNS := data.VirtualCluster().Spec.KubeInKubeConfig != nil && data.VirtualCluster().Spec.KubeInKubeConfig.UseTenantDNS - UseNodeLocalDNS := data.VirtualCluster().Spec.KubeInKubeConfig != nil && data.VirtualCluster().Spec.KubeInKubeConfig.UseNodeLocalDNS - - skipComponents := getSkipComponentsForVirtualCluster([]*SkipComponentCondition{ - { - // skip coredns component if tenant dns is enabled - Condition: !UseTenantDNS, - ComponentName: constants.TenantCoreDNSComponentName, - }, { - // skip keepalived component if vip is not enabled - Condition: !keepalivedEnable, - ComponentName: constants.VipKeepalivedComponentName, - }, - { - // skip nodelocaldns component if nodelocaldns is not enabled - Condition: !UseNodeLocalDNS, - ComponentName: constants.NodeLocalDNSComponentName, - }, - }) - - for _, component := range components { - klog.V(2).Infof("Deploy component %s", component.Name) - if v, ok := skipComponents[component.Name]; ok && v { - klog.V(2).Infof("Deploy component %s skipped", component.Name) - continue - } - if component.Name == constants.NodeLocalDNSComponentName { - kubeDNSIP, err := getKubeDNSClusterIP(config) - if err != nil { - return errors.Wrap(err, "Failed to get kube-dns ClusterIP") - } - klog.Infof("kube-dns CLUSTER-IP: %s", kubeDNSIP) - templatedMapping["PillarClusterDNS"] = kubeDNSIP - } - err = applyTemplatedManifests(component.Name, dynamicClient, component.Path, templatedMapping) - if err != nil { - return err - } - } - - klog.V(2).InfoS("[manifests-components] Successfully installed virtual cluster manifests-components", "virtual cluster", klog.KObj(data)) - return nil -} - -func getComponentsConfig(client clientset.Interface) ([]ComponentConfig, error) { - cm, err := client.CoreV1().ConfigMaps(constants.KosmosNs).Get(context.Background(), constants.ManifestComponentsConfigMap, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - - yamlData, ok := cm.Data["components"] - if !ok { - return nil, errors.Wrap(err, "Read manifests components config error") - } - - var components []ComponentConfig - err = yaml.Unmarshal([]byte(yamlData), &components) - if err != nil { - return nil, errors.Wrap(err, "Unmarshal manifests component config error") - } - return components, nil -} - -func applyTemplatedManifests(component string, dynamicClient dynamic.Interface, manifestGlob string, templateMapping map[string]interface{}) error { - manifests, err := filepath.Glob(manifestGlob) - klog.V(2).Infof("Component Manifests %s", manifestGlob) - if err != nil { - return err - } - if manifests == nil { - return errors.Errorf("No matching file for pattern %v", manifestGlob) - } - for _, manifest := range manifests { - klog.V(2).Infof("Applying %s", manifest) - var obj unstructured.Unstructured - bytesData, err := os.ReadFile(manifest) - if err != nil { - return errors.Wrapf(err, "Read file %s error", manifest) - } - templateData := bytesData - // template doesn't suit for prometheus rules, we deploy it directly - if component != constants.PrometheusRuleManifest { - templateString, err := util.ParseTemplate(string(bytesData), templateMapping) - if err != nil { - return errors.Wrapf(err, "Parse manifest file %s template error", manifest) - } - templateData = []byte(templateString) - } - err = yaml.Unmarshal(templateData, &obj) - if err != nil { - return errors.Wrapf(err, "Unmarshal manifest bytes data error") - } - gvk := obj.GroupVersionKind() - gvr, _ := meta.UnsafeGuessKindToResource(gvk) - if obj.GetName() == constants.KubeProxyConfigmap && gvr.Resource == "configmaps" { - cm := &corev1.ConfigMap{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, cm) - if err != nil { - return errors.Wrapf(err, "Convert unstructured obj to configmap %s error", obj.GetName()) - } - cm.Data["kubeconfig.conf"] = templateMapping["KUBE_PROXY_KUBECONFIG"].(string) - res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cm) - if err != nil { - return errors.Wrapf(err, "Convert configmap %s to unstructured obj error", obj.GetName()) - } - obj = unstructured.Unstructured{Object: res} - } - err = apiclient.TryRunCommand(func() error { - return util.ApplyObject(dynamicClient, &obj) - }, 3) - if err != nil { - return errors.Wrapf(err, "Create object error") - } - } - return nil -} -func getKubeDNSClusterIP(config *rest.Config) (string, error) { - client, err := clientset.NewForConfig(config) - if err != nil { - return "", fmt.Errorf("failed to create kubernetes client: %v", err) - } - - svc, err := client.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("failed to get kube-dns service: %v", err) - } - - return svc.Spec.ClusterIP, nil -} diff --git a/pkg/kubenest/tasks/manifests_components_test.go b/pkg/kubenest/tasks/manifests_components_test.go deleted file mode 100644 index 9f57b4e98..000000000 --- a/pkg/kubenest/tasks/manifests_components_test.go +++ /dev/null @@ -1,476 +0,0 @@ -package tasks - -import "testing" - -type ResultFlag bool - -const ( - Reserve ResultFlag = true - Skip ResultFlag = false -) - -type Want struct { - Name string - Result ResultFlag // false if skip -} - -func TestGetSkipComponentsForVirtualCluster(t *testing.T) { - tests := []struct { - name string - input []*SkipComponentCondition - want []Want - skipCount int - }{ - { - name: "test-single", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - }, - skipCount: 1, - }, - { - name: "test-double", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: true, - ComponentName: "skip-2", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Skip, - }, - }, - skipCount: 2, - }, - { - name: "test-middle", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: false, - ComponentName: "skip-2", - }, - { - Condition: true, - ComponentName: "skip-3", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Reserve, - }, - { - Name: "skip-3", - Result: Skip, - }, - }, - skipCount: 2, - }, - { - name: "test-all-reserve", - input: []*SkipComponentCondition{ - { - Condition: false, - ComponentName: "skip-1", - }, - { - Condition: false, - ComponentName: "skip-2", - }, - { - Condition: false, - ComponentName: "skip-3", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Reserve, - }, - { - Name: "skip-2", - Result: Reserve, - }, - { - Name: "skip-3", - Result: Reserve, - }, - }, - skipCount: 0, - }, - { - name: "test-all-skip", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: true, - ComponentName: "skip-2", - }, - { - Condition: true, - ComponentName: "skip-3", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Skip, - }, - { - Name: "skip-3", - Result: Skip, - }, - }, - skipCount: 3, - }, - { - name: "test-first-skip", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: false, - ComponentName: "skip-2", - }, - { - Condition: false, - ComponentName: "skip-3", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Reserve, - }, - { - Name: "skip-3", - Result: Reserve, - }, - }, - skipCount: 1, - }, - { - name: "test-big-data", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: false, - ComponentName: "skip-2", - }, - { - Condition: false, - ComponentName: "skip-3", - }, - { - Condition: false, - ComponentName: "skip-4", - }, - { - Condition: false, - ComponentName: "skip-5", - }, - { - Condition: false, - ComponentName: "skip-6", - }, - { - Condition: false, - ComponentName: "skip-7", - }, - { - Condition: false, - ComponentName: "skip-8", - }, - { - Condition: false, - ComponentName: "skip-9", - }, - { - Condition: false, - ComponentName: "skip-10", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Reserve, - }, - { - Name: "skip-3", - Result: Reserve, - }, - { - Name: "skip-4", - Result: Reserve, - }, - { - Name: "skip-5", - Result: Reserve, - }, - { - Name: "skip-6", - Result: Reserve, - }, - { - Name: "skip-7", - Result: Reserve, - }, - { - Name: "skip-8", - Result: Reserve, - }, - { - Name: "skip-9", - Result: Reserve, - }, - { - Name: "skip-10", - Result: Reserve, - }, - }, - skipCount: 1, - }, - { - name: "test-big-data", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: false, - ComponentName: "skip-2", - }, - { - Condition: false, - ComponentName: "skip-3", - }, - { - Condition: false, - ComponentName: "skip-4", - }, - { - Condition: false, - ComponentName: "skip-5", - }, - { - Condition: false, - ComponentName: "skip-6", - }, - { - Condition: true, - ComponentName: "skip-7", - }, - { - Condition: true, - ComponentName: "skip-8", - }, - { - Condition: true, - ComponentName: "skip-9", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Reserve, - }, - { - Name: "skip-3", - Result: Reserve, - }, - { - Name: "skip-4", - Result: Reserve, - }, - { - Name: "skip-5", - Result: Reserve, - }, - { - Name: "skip-6", - Result: Reserve, - }, - { - Name: "skip-7", - Result: Skip, - }, - { - Name: "skip-8", - Result: Skip, - }, - { - Name: "skip-9", - Result: Skip, - }, - }, - skipCount: 4, - }, - { - name: "test-big-data", - input: []*SkipComponentCondition{ - { - Condition: true, - ComponentName: "skip-1", - }, - { - Condition: false, - ComponentName: "skip-2", - }, - { - Condition: false, - ComponentName: "skip-3", - }, - { - Condition: false, - ComponentName: "skip-4", - }, - { - Condition: false, - ComponentName: "skip-5", - }, - { - Condition: false, - ComponentName: "skip-6", - }, - { - Condition: true, - ComponentName: "skip-7", - }, - { - Condition: true, - ComponentName: "skip-8", - }, - { - Condition: true, - ComponentName: "skip-9", - }, - { - Condition: true, - ComponentName: "skip-10", - }, - { - Condition: true, - ComponentName: "skip-11", - }, - }, - want: []Want{ - { - Name: "skip-1", - Result: Skip, - }, - { - Name: "skip-2", - Result: Reserve, - }, - { - Name: "skip-3", - Result: Reserve, - }, - { - Name: "skip-4", - Result: Reserve, - }, - { - Name: "skip-5", - Result: Reserve, - }, - { - Name: "skip-6", - Result: Reserve, - }, - { - Name: "skip-7", - Result: Skip, - }, - { - Name: "skip-8", - Result: Skip, - }, - { - Name: "skip-9", - Result: Skip, - }, - { - Name: "skip-10", - Result: Skip, - }, - { - Name: "skip-11", - Result: Skip, - }, - }, - skipCount: 6, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - skipComponents := getSkipComponentsForVirtualCluster(tt.input) - count := 0 - for _, want := range tt.want { - if v, ok := skipComponents[want.Name]; ok && v { - count++ - continue - } - if !want.Result { - t.Errorf("getSkipComponentsForVirtualCluster() name: %v, want %v", want.Name, want.Result) - } - } - if count != tt.skipCount { - t.Errorf("getSkipComponentsForVirtualCluster() name: %v, want %v", count, tt.skipCount) - } - }) - } -} diff --git a/pkg/kubenest/tasks/proxy.go b/pkg/kubenest/tasks/proxy.go deleted file mode 100644 index 07e90b06f..000000000 --- a/pkg/kubenest/tasks/proxy.go +++ /dev/null @@ -1,153 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -var ( - virtualClusterProxyLabels = labels.Set{constants.Label: constants.Proxy} -) - -func NewVirtualClusterProxyTask() workflow.Task { - return workflow.Task{ - Name: "proxy", - Run: runProxy, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "deploy-proxy", - Run: runVirtualClusterProxy, - }, - { - Name: "check-proxy", - Run: runCheckVirtualClusterProxy, - }, - }, - } -} - -func runProxy(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("proxy task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[proxy] Running proxy task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runVirtualClusterProxy(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster proxy task invoked with an invalid data struct") - } - - kubeNestOpt := data.KubeNestOpt() - - // Get the kubeconfig of virtual cluster and put it into the cm of kube-proxy - secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return err - } - client, err := clientset.NewForConfig(config) - if err != nil { - return err - } - var virtualClient clientset.Interface = client - - kubeconfigString := string(secret.Data[constants.KubeConfig]) - - err = controlplane.EnsureVirtualClusterProxy( - virtualClient, - kubeconfigString, - kubeNestOpt.KubeInKubeConfig.ClusterCIDR, - ) - if err != nil { - return fmt.Errorf("failed to install virtual cluster proxy component, err: %w", err) - } - - klog.V(2).InfoS("[VirtualClusterProxy] Successfully installed virtual cluster proxy component", "virtual cluster", klog.KObj(data)) - return nil -} - -func runCheckVirtualClusterProxy(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("check-VirtualClusterProxy task invoked with an invalid data struct") - } - - checker := apiclient.NewVirtualClusterChecker(data.RemoteClient(), constants.ComponentBeReadyTimeout) - - err := checker.WaitForSomePods(virtualClusterProxyLabels.String(), data.GetNamespace(), 1) - if err != nil { - return fmt.Errorf("checking for virtual cluster proxy to ready timeout, err: %w", err) - } - - klog.V(2).InfoS("[check-VirtualClusterProxy] the virtual cluster proxy is ready", "virtual cluster", klog.KObj(data)) - return nil -} - -func UninstallVirtualClusterProxyTask() workflow.Task { - return workflow.Task{ - Name: "proxy", - Run: runProxy, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: constants.APIServer, - Run: uninstallVirtualClusterProxy, - }, - }, - } -} - -func uninstallVirtualClusterProxy(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual cluster proxy task invoked with an invalid data struct") - } - - secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") - } - config, err := clientcmd.RESTConfigFromKubeConfig(secret.Data[constants.KubeConfig]) - if err != nil { - return err - } - client, err := clientset.NewForConfig(config) - if err != nil { - return err - } - var virtualClient clientset.Interface = client - - err = controlplane.DeleteVirtualClusterProxy( - virtualClient, - ) - if err != nil { - return fmt.Errorf("failed to install virtual cluster proxy component, err: %w", err) - } - - klog.V(2).InfoS("[VirtualClusterProxy] Successfully uninstalled virtual cluster proxy component", "virtual cluster", klog.KObj(data)) - return nil -} diff --git a/pkg/kubenest/tasks/service.go b/pkg/kubenest/tasks/service.go deleted file mode 100644 index 30eaeefaa..000000000 --- a/pkg/kubenest/tasks/service.go +++ /dev/null @@ -1,90 +0,0 @@ -package tasks - -import ( - "errors" - "fmt" - - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" -) - -func NewVirtualClusterServiceTask() workflow.Task { - return workflow.Task{ - Name: "virtual-service", - Run: runService, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "virtual-service", - Run: runVirtualClusterService, - }, - }, - } -} - -func runService(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("service task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[service] Running service task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runVirtualClusterService(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual service task invoked with an invalid data struct") - } - - err := controlplane.EnsureVirtualClusterService( - data.RemoteClient(), - data.GetName(), - data.GetNamespace(), - data.HostPortMap(), - data.KubeNestOpt(), - data.VirtualCluster(), - ) - if err != nil { - return fmt.Errorf("failed to install virtual cluster service , err: %w", err) - } - - klog.V(2).InfoS("[Virtual Cluster Service] Successfully installed virtual cluster service", "virtual cluster", klog.KObj(data)) - return nil -} - -func UninstallVirtualClusterServiceTask() workflow.Task { - return workflow.Task{ - Name: "virtual-service", - Run: runService, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "virtual-service", - Run: uninstallVirtualClusterService, - }, - }, - } -} - -func uninstallVirtualClusterService(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Virtual service task invoked with an invalid data struct") - } - - err := controlplane.DeleteVirtualClusterService( - data.RemoteClient(), - data.GetName(), - data.GetNamespace(), - ) - if err != nil { - return fmt.Errorf("failed to uninstall virtual cluster service , err: %w", err) - } - - klog.V(2).Infof("[Virtual Cluster Service] Successfully uninstalled virtual cluster %s service", data.GetName()) - return nil -} diff --git a/pkg/kubenest/tasks/upload.go b/pkg/kubenest/tasks/upload.go deleted file mode 100644 index 1a82a95ca..000000000 --- a/pkg/kubenest/tasks/upload.go +++ /dev/null @@ -1,321 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/klog/v2" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" - "github.com/kosmos.io/kosmos/pkg/kubenest/util/cert" - "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -var ( - VirtualClusterControllerLabel = labels.Set{constants.VirtualClusterLabelKeyName: constants.VirtualClusterController} -) - -type PortInfo struct { - NodePort int32 - ClusterIPPort int32 -} - -func NewUploadCertsTask() workflow.Task { - return workflow.Task{ - Name: "Upload-Certs", - Run: runUploadCerts, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "Upload-VirtualClusterCert", - Run: runUploadVirtualClusterCert, - }, - { - Name: "Upload-EtcdCert", - Run: runUploadEtcdCert, - }, - }, - } -} - -func NewUploadKubeconfigTask() workflow.Task { - return workflow.Task{ - Name: "upload-config", - RunSubTasks: true, - Run: runUploadKubeconfig, - Tasks: []workflow.Task{ - { - Name: "UploadAdminKubeconfig", - Run: runUploadAdminKubeconfig, - }, - }, - } -} - -func runUploadCerts(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("upload-certs task invoked with an invalid data struct") - } - klog.V(4).InfoS("[upload-certs] Running upload-certs task", "virtual cluster", klog.KObj(data)) - - if len(data.CertList()) == 0 { - return errors.New("there is no certs in store, please reload certs to store") - } - return nil -} - -func runUploadKubeconfig(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("upload-config task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[upload-config] Running task", "virtual cluster", klog.KObj(data)) - return nil -} - -func runUploadVirtualClusterCert(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("upload-VirtualClusterCert task invoked with an invalid data struct") - } - - certList := data.CertList() - certsData := make(map[string][]byte, len(certList)) - for _, c := range certList { - certsData[c.KeyName()] = c.KeyData() - certsData[c.CertName()] = c.CertData() - } - - err := createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.GetCertName(data.GetName()), - Namespace: data.GetNamespace(), - Labels: VirtualClusterControllerLabel, - }, - Data: certsData, - }) - if err != nil { - return fmt.Errorf("failed to upload virtual cluster cert to secret, err: %w", err) - } - - klog.V(2).InfoS("[upload-VirtualClusterCert] Successfully uploaded virtual cluster certs to secret", "virtual cluster", klog.KObj(data)) - return nil -} - -func runUploadEtcdCert(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("upload-etcdCert task invoked with an invalid data struct") - } - - ca := data.GetCert(constants.EtcdCaCertAndKeyName) - server := data.GetCert(constants.EtcdServerCertAndKeyName) - client := data.GetCert(constants.EtcdClientCertAndKeyName) - - err := createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: data.GetNamespace(), - Name: util.GetEtcdCertName(data.GetName()), - Labels: VirtualClusterControllerLabel, - }, - - Data: map[string][]byte{ - ca.CertName(): ca.CertData(), - ca.KeyName(): ca.KeyData(), - server.CertName(): server.CertData(), - server.KeyName(): server.KeyData(), - client.CertName(): client.CertData(), - client.KeyName(): client.KeyData(), - }, - }) - if err != nil { - return fmt.Errorf("failed to upload etcd certs to secret, err: %w", err) - } - - klog.V(2).InfoS("[upload-etcdCert] Successfully uploaded etcd certs to secret", "virtual cluster", klog.KObj(data)) - return nil -} - -func createOrUpdateSecret(client clientset.Interface, secret *corev1.Secret) error { - _, err := client.CoreV1().Secrets(secret.GetNamespace()).Create(context.TODO(), secret, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - _, err := client.CoreV1().Secrets(secret.GetNamespace()).Update(context.TODO(), secret, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated secret", "secret", secret.GetName()) - return nil -} - -func runUploadAdminKubeconfig(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("UploadAdminKubeconfig task invoked with an invalid data struct") - } - - var controlplaneIPEndpoint, clusterIPEndpoint string - service, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), util.GetAPIServerName(data.GetName()), metav1.GetOptions{}) - if err != nil { - return err - } - portInfo := getPortInfoFromAPIServerService(service) - // controlplane address + nodePort - controlplaneIPEndpoint = fmt.Sprintf("https://%s", utils.GenerateAddrStr(data.ControlplaneAddress(), fmt.Sprintf("%d", portInfo.NodePort))) - controlplaneIPKubeconfig, err := buildKubeConfigFromSpec(data, controlplaneIPEndpoint) - if err != nil { - return err - } - - //clusterIP address + clusterIPPort - clusterIPEndpoint = fmt.Sprintf("https://%s", utils.GenerateAddrStr(service.Spec.ClusterIP, fmt.Sprintf("%d", portInfo.ClusterIPPort))) - clusterIPKubeconfig, err := buildKubeConfigFromSpec(data, clusterIPEndpoint) - if err != nil { - return err - } - - controlplaneIPConfigBytes, err := clientcmd.Write(*controlplaneIPKubeconfig) - if err != nil { - return err - } - - clusterIPConfigBytes, err := clientcmd.Write(*clusterIPKubeconfig) - if err != nil { - return err - } - - err = createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: data.GetNamespace(), - Name: util.GetAdminConfigSecretName(data.GetName()), - Labels: VirtualClusterControllerLabel, - }, - Data: map[string][]byte{"kubeconfig": controlplaneIPConfigBytes}, - }) - if err != nil { - return fmt.Errorf("failed to create secret of kubeconfig, err: %w", err) - } - - err = createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: data.GetNamespace(), - Name: util.GetAdminConfigClusterIPSecretName(data.GetName()), - Labels: VirtualClusterControllerLabel, - }, - Data: map[string][]byte{"kubeconfig": clusterIPConfigBytes}, - }) - if err != nil { - return fmt.Errorf("failed to create secret of kubeconfig-clusterip, err: %w", err) - } - - klog.V(2).InfoS("[UploadAdminKubeconfig] Successfully created secrets of virtual cluster apiserver kubeconfig", "virtual cluster", klog.KObj(data)) - return nil -} - -func getPortInfoFromAPIServerService(service *corev1.Service) PortInfo { - var portInfo PortInfo - if service.Spec.Type == corev1.ServiceTypeNodePort { - for _, port := range service.Spec.Ports { - if port.Name != constants.APIServerSVCPortName { - continue - } - portInfo.NodePort = port.NodePort - portInfo.ClusterIPPort = port.Port - } - } - - return portInfo -} - -func buildKubeConfigFromSpec(data InitData, serverURL string) (*clientcmdapi.Config, error) { - ca := data.GetCert(constants.CaCertAndKeyName) - if ca == nil { - return nil, errors.New("unable build virtual cluster admin kubeconfig, CA cert is empty") - } - - cc := cert.VirtualClusterCertClient() - - if err := mutateCertConfig(data, cc); err != nil { - return nil, fmt.Errorf("error when mutate cert altNames for %s, err: %w", cc.Name, err) - } - client, err := cert.CreateCertAndKeyFilesWithCA(cc, ca.CertData(), ca.KeyData()) - if err != nil { - return nil, fmt.Errorf("failed to generate virtual cluster apiserver client certificate for kubeconfig, err: %w", err) - } - - return util.CreateWithCerts( - serverURL, - constants.ClusterName, - constants.UserName, - ca.CertData(), - client.KeyData(), - client.CertData(), - ), nil -} - -func UninstallCertsAndKubeconfigTask() workflow.Task { - return workflow.Task{ - Name: "Uninstall-Certs", - Run: runUninstallCerts, - RunSubTasks: true, - Tasks: []workflow.Task{ - { - Name: "Uninstall-Certs", - Run: deleteSecrets, - }, - }, - } -} - -func runUninstallCerts(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("Uninstall-Certs task invoked with an invalid data struct") - } - - klog.V(4).InfoS("[uninstall-Certs] Running task", "virtual cluster", klog.KObj(data)) - return nil -} - -func deleteSecrets(r workflow.RunData) error { - data, ok := r.(InitData) - if !ok { - return errors.New("upload-VirtualClusterCert task invoked with an invalid data struct") - } - - secrets := []string{ - util.GetCertName(data.GetName()), - util.GetEtcdCertName(data.GetName()), - util.GetAdminConfigSecretName(data.GetName()), - util.GetAdminConfigClusterIPSecretName(data.GetName()), - } - for _, secret := range secrets { - err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Delete(context.TODO(), secret, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("Secret %s/%s not found, skip delete", secret, data.GetNamespace()) - continue - } - return errors.Wrapf(err, "Failed to delete secret %s/%s", secret, data.GetNamespace()) - } - } - klog.V(2).Infof("Successfully uninstalled virtual cluster %s secrets", data.GetName()) - return nil -} diff --git a/pkg/kubenest/util/address.go b/pkg/kubenest/util/address.go deleted file mode 100644 index f22defbd2..000000000 --- a/pkg/kubenest/util/address.go +++ /dev/null @@ -1,83 +0,0 @@ -package util - -import ( - "context" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - clientset "k8s.io/client-go/kubernetes" - netutils "k8s.io/utils/net" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func GetAPIServiceIP(clientset clientset.Interface) (string, error) { - nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil || len(nodes.Items) == 0 { - return "", fmt.Errorf("there are no nodes in cluster, err: %w", err) - } - - var ( - masterLabel = labels.Set{utils.LabelNodeRoleOldControlPlane: ""} - controlplaneLabel = labels.Set{utils.LabelNodeRoleControlPlane: ""} - ) - // first, select the master node as the IP of APIServer. if there is - // no master nodes, randomly select a worker node. - for _, node := range nodes.Items { - ls := labels.Set(node.GetLabels()) - - if masterLabel.AsSelector().Matches(ls) || controlplaneLabel.AsSelector().Matches(ls) { - if ip := netutils.ParseIPSloppy(node.Status.Addresses[0].Address); ip != nil { - return ip.String(), nil - } - } - } - return nodes.Items[0].Status.Addresses[0].Address, nil -} - -func GetAPIServiceClusterIP(namespace string, client clientset.Interface) (string, error) { - serviceLists, err := client.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return "", err - } - if serviceLists != nil { - for _, service := range serviceLists.Items { - if service.Spec.Type == constants.ServiceType { - return service.Spec.ClusterIP, nil - } - } - } - return "", nil -} - -func GetServiceClusterIP(namespace string, client clientset.Interface) ([]string, error) { - serviceLists, err := client.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - var clusterIps []string - if serviceLists != nil { - for _, service := range serviceLists.Items { - if service.Spec.ClusterIP != "" { - clusterIps = append(clusterIps, service.Spec.ClusterIP) - } - } - } - return clusterIps, nil -} - -func GetEtcdServiceClusterIP(namespace string, serviceName string, client clientset.Interface) (string, error) { - service, err := client.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) - if err != nil { - return "", err - } - - // 检查服务是否是期望的类型并且具有有效的 ClusterIP - if service.Spec.Type == constants.EtcdServiceType && service.Spec.ClusterIP != "" { - return service.Spec.ClusterIP, nil - } - - return "", fmt.Errorf("Service %s not found or does not have a valid ClusterIP for Etcd", serviceName) -} diff --git a/pkg/kubenest/util/address_test.go b/pkg/kubenest/util/address_test.go deleted file mode 100644 index c08b10180..000000000 --- a/pkg/kubenest/util/address_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package util - -import ( - "testing" - - netutils "k8s.io/utils/net" -) - -func TestGetAPIServiceIP(t *testing.T) { - client, err := prepare() - if err != nil { - t.Logf("failed to prepare client: %v", err) - return - } - - str, err := GetAPIServiceIP(client) - if err != nil { - t.Logf("failed to get api service ip: %v", err) - } - if len(str) == 0 { - t.Logf("api service ip is empty") - } else { - t.Logf("api service ip is %s", str) - } -} - -func TestParseIP(t *testing.T) { - tests := []struct { - name string - input string - want string - }{ - {"ipv4", "10.237.6.0", "10.237.6.0"}, - {"ipv6", "2409:8c2f:3800:0011::0a18:0000", "2409:8c2f:3800:11::a18:0"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ip := netutils.ParseIPSloppy(tt.input) - if ip.String() != tt.want { - t.Fatalf("%s, %s, %s, %s", tt.name, tt.input, ip.String(), tt.want) - } - }) - } -} diff --git a/pkg/kubenest/util/api-client/check.go b/pkg/kubenest/util/api-client/check.go deleted file mode 100644 index 54d9ebe4f..000000000 --- a/pkg/kubenest/util/api-client/check.go +++ /dev/null @@ -1,96 +0,0 @@ -package util - -import ( - "context" - "net/http" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -const DefaultRetryCount = 3 - -type Checker interface { - WaitForAPI() error - WaitForSomePods(label, namespace string, podNum int32) error -} - -type VirtualClusterChecker struct { - client clientset.Interface - timeout time.Duration -} - -func NewVirtualClusterChecker(client clientset.Interface, timeout time.Duration) Checker { - return &VirtualClusterChecker{ - client: client, - timeout: timeout, - } -} - -func (v *VirtualClusterChecker) WaitForSomePods(label, namespace string, podNum int32) error { - return wait.PollImmediate(constants.APIServerCallRetryInterval, v.timeout, func() (bool, error) { - listOpts := metav1.ListOptions{LabelSelector: label} - pods, err := v.client.CoreV1().Pods(namespace).List(context.TODO(), listOpts) - if err != nil { - return false, nil - } - - if len(pods.Items) == 0 { - return false, nil - } - - var expected int32 - for _, pod := range pods.Items { - if isPodRunning(pod) { - expected++ - } - } - return expected >= podNum, nil - }) -} -func (v *VirtualClusterChecker) WaitForAPI() error { - return wait.PollImmediate(constants.APIServerCallRetryInterval, v.timeout, func() (bool, error) { - healthStatus := 0 - v.client.Discovery().RESTClient().Get().AbsPath("/healthz").Do(context.TODO()).StatusCode(&healthStatus) - if healthStatus != http.StatusOK { - return false, nil - } - - return true, nil - }) -} - -func TryRunCommand(f func() error, failureThreshold int) error { - backoff := wait.Backoff{ - Duration: 5 * time.Second, - Factor: 2, // double the timeout for every failure - Steps: failureThreshold, - } - return wait.ExponentialBackoff(backoff, func() (bool, error) { - err := f() - if err != nil { - // Retry until the timeout - return false, nil - } - // The last f() call was a success, return cleanly - return true, nil - }) -} - -func isPodRunning(pod corev1.Pod) bool { - if pod.Status.Phase != corev1.PodRunning || pod.DeletionTimestamp != nil { - return false - } - - for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { - return true - } - } - return false -} diff --git a/pkg/kubenest/util/api-client/check_test.go b/pkg/kubenest/util/api-client/check_test.go deleted file mode 100644 index d7528b2ab..000000000 --- a/pkg/kubenest/util/api-client/check_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package util - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" // 使用 kubernetes 的 fake 客户端 -) - -// TestVirtualClusterChecker_WaitForSomePods 测试 WaitForSomePods 方法 -func TestVirtualClusterChecker_WaitForSomePods(t *testing.T) { - // 创建一个 fake 客户端,包含一些 Pod - client := fake.NewSimpleClientset(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-1", - Namespace: "default", - Labels: map[string]string{"app": "test"}, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - {Type: corev1.PodReady, Status: corev1.ConditionTrue}, - }, - }, - }, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-2", - Namespace: "default", - Labels: map[string]string{"app": "test"}, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - {Type: corev1.PodReady, Status: corev1.ConditionTrue}, - }, - }, - }) - - checker := NewVirtualClusterChecker(client, 10*time.Second) - err := checker.WaitForSomePods("app=test", "default", 2) - assert.NoError(t, err) -} - -// TestVirtualClusterChecker_WaitForSomePods_Error 测试 WaitForSomePods 方法失败情况 -func TestVirtualClusterChecker_WaitForSomePods_Error(t *testing.T) { - // 创建一个 fake 客户端,包含一些 Pod - client := fake.NewSimpleClientset(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-1", - Namespace: "default", - Labels: map[string]string{"app": "test"}, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - {Type: corev1.PodReady, Status: corev1.ConditionFalse}, - }, - }, - }) - - checker := NewVirtualClusterChecker(client, 10*time.Second) - err := checker.WaitForSomePods("app=test1", "default", 2) - assert.Error(t, err) -} - -// TestTryRunCommand 测试 TryRunCommand 函数 -func TestTryRunCommand(t *testing.T) { - count := 0 - f := func() error { - count++ - if count < 3 { - return fmt.Errorf("error") - } - return nil - } - - err := TryRunCommand(f, 3) - assert.NoError(t, err) -} - -func TestTryRunCommand_Error(t *testing.T) { - f := func() error { - return fmt.Errorf("error") - } - - err := TryRunCommand(f, 1) - assert.Error(t, err) -} diff --git a/pkg/kubenest/util/cert/certs.go b/pkg/kubenest/util/cert/certs.go deleted file mode 100644 index c74a77a3c..000000000 --- a/pkg/kubenest/util/cert/certs.go +++ /dev/null @@ -1,557 +0,0 @@ -package cert - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - cryptorand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "math" - "math/big" - "net" - "time" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - certutil "k8s.io/client-go/util/cert" - "k8s.io/client-go/util/keyutil" - netutils "k8s.io/utils/net" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -// nolint:revive -type CertConfig struct { - Name string - CAName string - NotAfter *time.Time - PublicKeyAlgorithm x509.PublicKeyAlgorithm - Config certutil.Config - AltNamesMutatorFunc altNamesMutatorFunc -} - -type altNamesMutatorFunc func(*AltNamesMutatorConfig, *CertConfig) error - -type AltNamesMutatorConfig struct { - Name string - Namespace string - ControlplaneAddr string - ClusterIPs []string - ExternalIP string - ExternalIPs []string - VipMap map[string]string -} - -func (config *CertConfig) defaultPublicKeyAlgorithm() { - if config.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm { - config.PublicKeyAlgorithm = x509.RSA - } -} - -func (config *CertConfig) defaultNotAfter() { - if config.NotAfter == nil { - notAfter := time.Now().Add(constants.CertificateValidity).UTC() - config.NotAfter = ¬After - } -} - -func GetDefaultCertList() []*CertConfig { - return []*CertConfig{ - // virtual cluster cert config. - VirtualClusterCertRootCA(), - VirtualClusterCertAdmin(), - VirtualClusterCertApiserver(), - // front proxy cert config. - VirtualClusterCertFrontProxyCA(), - VirtualClusterFrontProxyClient(), - // ETCD cert config. - VirtualClusterCertEtcdCA(), - VirtualClusterCertEtcdServer(), - VirtualClusterCertEtcdClient(), - // proxy server cert config. - VirtualClusterProxyServer(), - } -} - -func VirtualClusterProxyServer() *CertConfig { - return &CertConfig{ - Name: constants.ProxyServerCertAndKeyName, - CAName: constants.CaCertAndKeyName, - Config: certutil.Config{ - CommonName: "virtualCluster-proxy-server", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - }, - AltNamesMutatorFunc: makeAltNamesMutator(proxyServerAltNamesMutator), - } -} - -func VirtualClusterCertEtcdCA() *CertConfig { - return &CertConfig{ - Name: constants.EtcdCaCertAndKeyName, - Config: certutil.Config{ - CommonName: "virtualcluster-etcd-ca", - }, - } -} - -func VirtualClusterCertEtcdServer() *CertConfig { - return &CertConfig{ - Name: constants.EtcdServerCertAndKeyName, - CAName: constants.EtcdCaCertAndKeyName, - Config: certutil.Config{ - CommonName: "virtualCluster-etcd-server", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - }, - AltNamesMutatorFunc: makeAltNamesMutator(etcdServerAltNamesMutator), - } -} - -func VirtualClusterCertEtcdClient() *CertConfig { - return &CertConfig{ - Name: constants.EtcdClientCertAndKeyName, - CAName: constants.EtcdCaCertAndKeyName, - Config: certutil.Config{ - CommonName: "virtualCluster-etcd-client", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - }, - } -} - -func VirtualClusterCertFrontProxyCA() *CertConfig { - return &CertConfig{ - Name: constants.FrontProxyCaCertAndKeyName, - Config: certutil.Config{ - CommonName: "front-proxy-ca", - }, - } -} - -func VirtualClusterFrontProxyClient() *CertConfig { - return &CertConfig{ - Name: constants.FrontProxyClientCertAndKeyName, - CAName: constants.FrontProxyCaCertAndKeyName, - Config: certutil.Config{ - CommonName: "front-proxy-client", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }, - } -} - -func etcdServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error) { - etcdClientServiceDNS := fmt.Sprintf("%s.%s.svc.cluster.local", util.GetEtcdClientServerName(cfg.Name), cfg.Namespace) - etcdPeerServiceDNS := fmt.Sprintf("*.%s.%s.svc.cluster.local", util.GetEtcdServerName(cfg.Name), cfg.Namespace) - - altNames := &certutil.AltNames{ - DNSNames: []string{"localhost", etcdClientServiceDNS, etcdPeerServiceDNS}, - IPs: []net.IP{net.ParseIP("::1"), net.IPv4(127, 0, 0, 1)}, - } - - if len(cfg.ClusterIPs) > 0 { - for _, clusterIP := range cfg.ClusterIPs { - appendSANsToAltNames(altNames, []string{clusterIP}) - } - } - return altNames, nil -} - -func VirtualClusterCertApiserver() *CertConfig { - return &CertConfig{ - Name: constants.ApiserverCertAndKeyName, - CAName: constants.CaCertAndKeyName, - Config: certutil.Config{ - CommonName: "virtualCluster-apiserver", - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - }, - AltNamesMutatorFunc: makeAltNamesMutator(apiServerAltNamesMutator), - } -} - -func VirtualClusterCertRootCA() *CertConfig { - return &CertConfig{ - Name: constants.CaCertAndKeyName, - Config: certutil.Config{ - CommonName: "virtualCluster", - }, - } -} - -func VirtualClusterCertAdmin() *CertConfig { - return &CertConfig{ - Name: constants.VirtualClusterCertAndKeyName, - CAName: constants.CaCertAndKeyName, - Config: certutil.Config{ - CommonName: "system:admin", - Organization: []string{"system:masters"}, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - }, - AltNamesMutatorFunc: makeAltNamesMutator(apiServerAltNamesMutator), - } -} - -func makeAltNamesMutator(f func(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error)) altNamesMutatorFunc { - return func(cfg *AltNamesMutatorConfig, cc *CertConfig) error { - altNames, err := f(cfg) - if err != nil { - return err - } - - cc.Config.AltNames = *altNames - return nil - } -} - -func proxyServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error) { - firstIPs, err := util.GetFirstIP(constants.APIServerServiceSubnet) - if err != nil { - return nil, err - } - - altNames := &certutil.AltNames{ - DNSNames: []string{ - "localhost", - "kubernetes", - "kubernetes.default", - "kubernetes.default.svc", - }, - IPs: append([]net.IP{ - net.ParseIP("::1"), - net.IPv4(127, 0, 0, 1), - }, firstIPs...), - } - - if cfg.Namespace != constants.VirtualClusterSystemNamespace { - appendSANsToAltNames(altNames, []string{fmt.Sprintf("*.%s.svc.cluster.local", cfg.Namespace), - fmt.Sprintf("*.%s.svc", cfg.Namespace)}) - } - if len(cfg.ControlplaneAddr) > 0 { - appendSANsToAltNames(altNames, []string{cfg.ControlplaneAddr}) - } - if len(cfg.ExternalIP) > 0 { - appendSANsToAltNames(altNames, []string{cfg.ExternalIP}) - } - - if len(cfg.ExternalIPs) > 0 { - for _, externalIP := range cfg.ExternalIPs { - appendSANsToAltNames(altNames, []string{externalIP}) - } - } - - if len(cfg.ClusterIPs) > 0 { - for _, clusterIP := range cfg.ClusterIPs { - appendSANsToAltNames(altNames, []string{clusterIP}) - } - } - return altNames, nil -} - -func apiServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error) { - firstIPs, err := util.GetFirstIP(constants.APIServerServiceSubnet) - if err != nil { - return nil, err - } - - altNames := &certutil.AltNames{ - DNSNames: []string{ - "localhost", - "kubernetes", - "kubernetes.default", - "kubernetes.default.svc", - "konnectivity-server.kube-system.svc.cluster.local", - // fmt.Sprintf("*.%s.svc.cluster.local", constants.VirtualClusterSystemNamespace), - fmt.Sprintf("*.%s.svc", constants.VirtualClusterSystemNamespace), - }, - //TODO (考虑节点属于当前集群节点和非当前集群节点情况) - IPs: append([]net.IP{ - net.ParseIP("::1"), - net.IPv4(127, 0, 0, 1), - }, firstIPs...), - } - - if cfg.Namespace != constants.VirtualClusterSystemNamespace { - appendSANsToAltNames(altNames, []string{fmt.Sprintf("*.%s.svc.cluster.local", cfg.Namespace), - fmt.Sprintf("*.%s.svc", cfg.Namespace)}) - } - if len(cfg.ControlplaneAddr) > 0 { - appendSANsToAltNames(altNames, []string{cfg.ControlplaneAddr}) - } - if len(cfg.ExternalIP) > 0 { - appendSANsToAltNames(altNames, []string{cfg.ExternalIP}) - } - - if len(cfg.ExternalIPs) > 0 { - for _, externalIP := range cfg.ExternalIPs { - appendSANsToAltNames(altNames, []string{externalIP}) - } - } - - if len(cfg.VipMap) > 0 { - for _, vip := range cfg.VipMap { - appendSANsToAltNames(altNames, []string{vip}) - } - } - if len(cfg.ClusterIPs) > 0 { - for _, clusterIP := range cfg.ClusterIPs { - appendSANsToAltNames(altNames, []string{clusterIP}) - } - } - return altNames, nil -} - -func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string) { - for _, altname := range SANs { - if ip := netutils.ParseIPSloppy(altname); ip != nil { - altNames.IPs = append(altNames.IPs, ip) - } else if len(validation.IsDNS1123Subdomain(altname)) == 0 { - altNames.DNSNames = append(altNames.DNSNames, altname) - } else if len(validation.IsWildcardDNS1123Subdomain(altname)) == 0 { - altNames.DNSNames = append(altNames.DNSNames, altname) - } - } -} - -type VirtualClusterCert struct { - pairName string - caName string - cert []byte - key []byte -} - -// CertData returns certificate cert data. -func (cert *VirtualClusterCert) CertData() []byte { - return cert.cert -} - -// KeyData returns certificate key data. -func (cert *VirtualClusterCert) KeyData() []byte { - return cert.key -} - -// CertName returns cert file name. its default suffix is ".crt". -func (cert *VirtualClusterCert) CertName() string { - pair := cert.pairName - if len(pair) == 0 { - pair = "cert" - } - return pair + constants.CertExtension -} - -// KeyName returns cert key file name. its default suffix is ".key". -func (cert *VirtualClusterCert) KeyName() string { - pair := cert.pairName - if len(pair) == 0 { - pair = "cert" - } - return pair + constants.KeyExtension -} - -func NewCertificateAuthority(cc *CertConfig) (*VirtualClusterCert, error) { - cc.defaultPublicKeyAlgorithm() - - key, err := GeneratePrivateKey(cc.PublicKeyAlgorithm) - if err != nil { - return nil, fmt.Errorf("unable to create private key while generating CA certificate, err: %w", err) - } - - cert, err := NewSelfSignedCACert(cc.Config, key) - if err != nil { - return nil, fmt.Errorf("unable to create self-signed CA certificate, err: %w", err) - } - - encoded, err := keyutil.MarshalPrivateKeyToPEM(key) - if err != nil { - return nil, fmt.Errorf("unable to marshal private key to PEM, err: %w", err) - } - - return &VirtualClusterCert{ - pairName: cc.Name, - caName: cc.CAName, - cert: EncodeCertPEM(cert), - key: encoded, - }, nil -} - -// NewSelfSignedCACert creates a CA certificate -func NewSelfSignedCACert(cfg certutil.Config, key crypto.Signer) (*x509.Certificate, error) { - now := time.Now() - tmpl := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), - Subject: pkix.Name{ - CommonName: cfg.CommonName, - Organization: cfg.Organization, - }, - DNSNames: []string{cfg.CommonName}, - NotBefore: now.UTC(), - NotAfter: now.Add(constants.CertificateValidity).UTC(), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - IsCA: true, - } - - certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) - if err != nil { - return nil, err - } - return x509.ParseCertificate(certDERBytes) -} - -func CreateCertAndKeyFilesWithCA(cc *CertConfig, caCertData, caKeyData []byte) (*VirtualClusterCert, error) { - if len(cc.Config.Usages) == 0 { - return nil, fmt.Errorf("must specify at least one ExtKeyUsage") - } - - cc.defaultNotAfter() - cc.defaultPublicKeyAlgorithm() - - key, err := GeneratePrivateKey(cc.PublicKeyAlgorithm) - if err != nil { - return nil, fmt.Errorf("unable to create private key, err: %w", err) - } - - caCerts, err := certutil.ParseCertsPEM(caCertData) - if err != nil { - return nil, err - } - - caKey, err := ParsePrivateKeyPEM(caKeyData) - if err != nil { - return nil, err - } - - // Safely pick the first one because the sender's certificate must come first in the list. - // For details, see: https://www.rfc-editor.org/rfc/rfc4346#section-7.4.2 - caCert := caCerts[0] - - cert, err := NewSignedCert(cc, key, caCert, caKey, false) - if err != nil { - return nil, err - } - - encoded, err := keyutil.MarshalPrivateKeyToPEM(key) - if err != nil { - return nil, fmt.Errorf("unable to marshal private key to PEM, err: %w", err) - } - - return &VirtualClusterCert{ - pairName: cc.Name, - caName: cc.CAName, - cert: EncodeCertPEM(cert), - key: encoded, - }, nil -} - -func EncodeCertPEM(cert *x509.Certificate) []byte { - block := pem.Block{ - Type: constants.CertificateBlockType, - Bytes: cert.Raw, - } - return pem.EncodeToMemory(&block) -} - -func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { - if keyType == x509.ECDSA { - return ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) - } - - return rsa.GenerateKey(cryptorand.Reader, constants.RsaKeySize) -} - -func ParsePrivateKeyPEM(keyData []byte) (crypto.Signer, error) { - caPrivateKey, err := keyutil.ParsePrivateKeyPEM(keyData) - if err != nil { - return nil, err - } - - // Allow RSA and ECDSA formats only - var key crypto.Signer - switch k := caPrivateKey.(type) { - case *rsa.PrivateKey: - key = k - case *ecdsa.PrivateKey: - key = k - default: - return nil, errors.New("the private key is neither in RSA nor ECDSA format") - } - - return key, nil -} - -func NewSignedCert(cc *CertConfig, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, isCA bool) (*x509.Certificate, error) { - serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64)) - if err != nil { - return nil, err - } - if len(cc.Config.CommonName) == 0 { - return nil, fmt.Errorf("must specify a CommonName") - } - - keyUsage := x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature - if isCA { - keyUsage |= x509.KeyUsageCertSign - } - - RemoveDuplicateAltNames(&cc.Config.AltNames) - notAfter := time.Now().Add(constants.CertificateValidity).UTC() - if cc.NotAfter != nil { - notAfter = *cc.NotAfter - } - - certTmpl := x509.Certificate{ - Subject: pkix.Name{ - CommonName: cc.Config.CommonName, - Organization: cc.Config.Organization, - }, - DNSNames: cc.Config.AltNames.DNSNames, - IPAddresses: cc.Config.AltNames.IPs, - SerialNumber: serial, - NotBefore: caCert.NotBefore, - NotAfter: notAfter, - KeyUsage: keyUsage, - ExtKeyUsage: cc.Config.Usages, - BasicConstraintsValid: true, - IsCA: isCA, - } - certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) - if err != nil { - return nil, err - } - return x509.ParseCertificate(certDERBytes) -} - -func RemoveDuplicateAltNames(altNames *certutil.AltNames) { - if altNames == nil { - return - } - - if altNames.DNSNames != nil { - altNames.DNSNames = sets.NewString(altNames.DNSNames...).List() - } - - ipsKeys := make(map[string]struct{}) - var ips []net.IP - for _, one := range altNames.IPs { - if _, ok := ipsKeys[one.String()]; !ok { - ipsKeys[one.String()] = struct{}{} - ips = append(ips, one) - } - } - altNames.IPs = ips -} - -func VirtualClusterCertClient() *CertConfig { - return &CertConfig{ - Name: "virtualCluster-client", - CAName: constants.CaCertAndKeyName, - Config: certutil.Config{ - CommonName: "system:admin", - Organization: []string{"system:masters"}, - Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }, - AltNamesMutatorFunc: makeAltNamesMutator(apiServerAltNamesMutator), - } -} diff --git a/pkg/kubenest/util/cert/certs_test.go b/pkg/kubenest/util/cert/certs_test.go deleted file mode 100644 index 69ba59a12..000000000 --- a/pkg/kubenest/util/cert/certs_test.go +++ /dev/null @@ -1,272 +0,0 @@ -package cert - -import ( - "crypto/x509" - "testing" - "time" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -func TestCertConfig_defaultPublicKeyAlgorithm(t *testing.T) { - // 测试场景 1:PublicKeyAlgorithm 未设置,应该设置为 x509.RSA - config := &CertConfig{ - PublicKeyAlgorithm: x509.UnknownPublicKeyAlgorithm, - } - config.defaultPublicKeyAlgorithm() - if config.PublicKeyAlgorithm != x509.RSA { - t.Errorf("expected PublicKeyAlgorithm to be x509.RSA, got %v", config.PublicKeyAlgorithm) - } - - // 测试场景 2:PublicKeyAlgorithm 已设置,不应更改 - config = &CertConfig{ - PublicKeyAlgorithm: x509.ECDSA, - } - config.defaultPublicKeyAlgorithm() - if config.PublicKeyAlgorithm != x509.ECDSA { - t.Errorf("expected PublicKeyAlgorithm to remain x509.ECDSA, got %v", config.PublicKeyAlgorithm) - } -} - -func TestCertConfig_defaultNotAfter(t *testing.T) { - // 测试场景 1:NotAfter 未设置,应该自动设置为当前时间加上常量值 - config := &CertConfig{ - NotAfter: nil, - } - config.defaultNotAfter() - expectedNotAfter := time.Now().Add(constants.CertificateValidity) - if config.NotAfter == nil || config.NotAfter.Sub(expectedNotAfter) > time.Second { - t.Errorf("expected NotAfter to be %v, got %v", expectedNotAfter, config.NotAfter) - } - - // 测试场景 2:NotAfter 已设置,不应更改 - expectedTime := time.Now().Add(24 * time.Hour) - config = &CertConfig{ - NotAfter: &expectedTime, - } - config.defaultNotAfter() - if config.NotAfter != &expectedTime { - t.Errorf("expected NotAfter to remain %v, got %v", expectedTime, config.NotAfter) - } -} - -func TestGetDefaultCertList(t *testing.T) { - certList := GetDefaultCertList() - - // 确认返回的 CertConfig 列表包含预期数量的配置 - expectedCertCount := 9 - if len(certList) != expectedCertCount { - t.Fatalf("expected %d certs, but got %d", expectedCertCount, len(certList)) - } - - // 验证每个 CertConfig 的 Name 是否符合预期 - expectedNames := []string{ - constants.CaCertAndKeyName, // CA cert - constants.VirtualClusterCertAndKeyName, // Admin cert - constants.ApiserverCertAndKeyName, // Apiserver cert - constants.FrontProxyCaCertAndKeyName, // Front proxy CA cert - constants.FrontProxyClientCertAndKeyName, // Front proxy client cert - constants.EtcdCaCertAndKeyName, // ETCD CA cert - constants.EtcdServerCertAndKeyName, // ETCD server cert - constants.EtcdClientCertAndKeyName, // ETCD client cert - constants.ProxyServerCertAndKeyName, // Proxy server cert - } - - for i, certConfig := range certList { - if certConfig.Name != expectedNames[i] { - t.Errorf("expected cert name %s, but got %s", expectedNames[i], certConfig.Name) - } - } -} - -func TestVirtualClusterProxyServer(t *testing.T) { - certConfig := VirtualClusterProxyServer() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.ProxyServerCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.ProxyServerCertAndKeyName, certConfig.Name) - } - if certConfig.CAName != constants.CaCertAndKeyName { - t.Errorf("expected CAName to be %s, but got %s", constants.CaCertAndKeyName, certConfig.CAName) - } - if certConfig.Config.CommonName != "virtualCluster-proxy-server" { - t.Errorf("expected CommonName to be virtualCluster-proxy-server, but got %s", certConfig.Config.CommonName) - } - expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} - if len(certConfig.Config.Usages) != len(expectedUsages) { - t.Errorf("expected %d usages, but got %d", len(expectedUsages), len(certConfig.Config.Usages)) - } - for i, usage := range certConfig.Config.Usages { - if usage != expectedUsages[i] { - t.Errorf("expected usage %v, but got %v", expectedUsages[i], usage) - } - } -} - -func TestVirtualClusterCertEtcdCA(t *testing.T) { - certConfig := VirtualClusterCertEtcdCA() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.EtcdCaCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.EtcdCaCertAndKeyName, certConfig.Name) - } - if certConfig.Config.CommonName != "virtualcluster-etcd-ca" { - t.Errorf("expected CommonName to be virtualcluster-etcd-ca, but got %s", certConfig.Config.CommonName) - } -} - -// Test VirtualClusterCertEtcdServer -func TestVirtualClusterCertEtcdServer(t *testing.T) { - certConfig := VirtualClusterCertEtcdServer() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.EtcdServerCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.EtcdServerCertAndKeyName, certConfig.Name) - } - if certConfig.CAName != constants.EtcdCaCertAndKeyName { - t.Errorf("expected CAName to be %s, but got %s", constants.EtcdCaCertAndKeyName, certConfig.CAName) - } - if certConfig.Config.CommonName != "virtualCluster-etcd-server" { - t.Errorf("expected CommonName to be virtualCluster-etcd-server, but got %s", certConfig.Config.CommonName) - } - expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} - if len(certConfig.Config.Usages) != len(expectedUsages) { - t.Errorf("expected %d usages, but got %d", len(expectedUsages), len(certConfig.Config.Usages)) - } - for i, usage := range certConfig.Config.Usages { - if usage != expectedUsages[i] { - t.Errorf("expected usage %v, but got %v", expectedUsages[i], usage) - } - } -} - -// Test VirtualClusterCertEtcdClient -func TestVirtualClusterCertEtcdClient(t *testing.T) { - certConfig := VirtualClusterCertEtcdClient() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.EtcdClientCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.EtcdClientCertAndKeyName, certConfig.Name) - } - if certConfig.CAName != constants.EtcdCaCertAndKeyName { - t.Errorf("expected CAName to be %s, but got %s", constants.EtcdCaCertAndKeyName, certConfig.CAName) - } - if certConfig.Config.CommonName != "virtualCluster-etcd-client" { - t.Errorf("expected CommonName to be virtualCluster-etcd-client, but got %s", certConfig.Config.CommonName) - } - expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} - if len(certConfig.Config.Usages) != len(expectedUsages) { - t.Errorf("expected %d usages, but got %d", len(expectedUsages), len(certConfig.Config.Usages)) - } -} - -// Test VirtualClusterCertFrontProxyCA -func TestVirtualClusterCertFrontProxyCA(t *testing.T) { - certConfig := VirtualClusterCertFrontProxyCA() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.FrontProxyCaCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.FrontProxyCaCertAndKeyName, certConfig.Name) - } - if certConfig.Config.CommonName != "front-proxy-ca" { - t.Errorf("expected CommonName to be front-proxy-ca, but got %s", certConfig.Config.CommonName) - } -} - -// Test VirtualClusterFrontProxyClient -func TestVirtualClusterFrontProxyClient(t *testing.T) { - certConfig := VirtualClusterFrontProxyClient() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.FrontProxyClientCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.FrontProxyClientCertAndKeyName, certConfig.Name) - } - if certConfig.CAName != constants.FrontProxyCaCertAndKeyName { - t.Errorf("expected CAName to be %s, but got %s", constants.FrontProxyCaCertAndKeyName, certConfig.CAName) - } - if certConfig.Config.CommonName != "front-proxy-client" { - t.Errorf("expected CommonName to be front-proxy-client, but got %s", certConfig.Config.CommonName) - } - expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - if len(certConfig.Config.Usages) != len(expectedUsages) { - t.Errorf("expected %d usages, but got %d", len(expectedUsages), len(certConfig.Config.Usages)) - } - for i, usage := range certConfig.Config.Usages { - if usage != expectedUsages[i] { - t.Errorf("expected usage %v, but got %v", expectedUsages[i], usage) - } - } -} - -// Test VirtualClusterCertApiserver -func TestVirtualClusterCertApiserver(t *testing.T) { - certConfig := VirtualClusterCertApiserver() - - // 验证 certConfig 的各项配置 - if certConfig.Name != constants.ApiserverCertAndKeyName { - t.Errorf("expected Name to be %s, but got %s", constants.ApiserverCertAndKeyName, certConfig.Name) - } - if certConfig.CAName != constants.CaCertAndKeyName { - t.Errorf("expected CAName to be %s, but got %s", constants.CaCertAndKeyName, certConfig.CAName) - } - if certConfig.Config.CommonName != "virtualCluster-apiserver" { - t.Errorf("expected CommonName to be virtualCluster-apiserver, but got %s", certConfig.Config.CommonName) - } - expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} - if len(certConfig.Config.Usages) != len(expectedUsages) { - t.Errorf("expected %d usages, but got %d", len(expectedUsages), len(certConfig.Config.Usages)) - } - for i, usage := range certConfig.Config.Usages { - if usage != expectedUsages[i] { - t.Errorf("expected usage %v, but got %v", expectedUsages[i], usage) - } - } -} - -// Test etcdServerAltNamesMutator -//func TestEtcdServerAltNamesMutator(t *testing.T) { -// cfg := &AltNamesMutatorConfig{ -// Name: "test", -// Namespace: "default", -// ClusterIPs: []string{ -// "10.96.0.1", -// "10.96.0.2", -// }, -// } -// -// altNames, err := etcdServerAltNamesMutator(cfg) -// if err != nil { -// t.Fatalf("unexpected error: %v", err) -// } -// -// // 验证 DNS 名称 -// expectedDNSNames := []string{ -// "localhost", -// "test.default.svc.cluster.local", -// "*.test.default.svc.cluster.local", -// } -// if len(altNames.DNSNames) != len(expectedDNSNames) { -// t.Fatalf("expected %d DNS names, but got %d", len(expectedDNSNames), len(altNames.DNSNames)) -// } -// for i, dns := range altNames.DNSNames { -// if dns != expectedDNSNames[i] { -// t.Errorf("expected DNS name %s, but got %s", expectedDNSNames[i], dns) -// } -// } -// -// // 验证 IP 地址 -// expectedIPs := []net.IP{ -// net.ParseIP("::1"), -// net.IPv4(127, 0, 0, 1), -// net.ParseIP("10.96.0.1"), -// net.ParseIP("10.96.0.2"), -// } -// if len(altNames.IPs) != len(expectedIPs) { -// t.Fatalf("expected %d IPs, but got %d", len(expectedIPs), len(altNames.IPs)) -// } -// for i, ip := range altNames.IPs { -// if !ip.Equal(expectedIPs[i]) { -// t.Errorf("expected IP %v, but got %v", expectedIPs[i], ip) -// } -// } -//} diff --git a/pkg/kubenest/util/cert/store.go b/pkg/kubenest/util/cert/store.go deleted file mode 100644 index 9977c9f2a..000000000 --- a/pkg/kubenest/util/cert/store.go +++ /dev/null @@ -1,94 +0,0 @@ -package cert - -import ( - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -// nolint:revive -type CertStore interface { - AddCert(cert *VirtualClusterCert) - GetCert(name string) *VirtualClusterCert - CertList() []*VirtualClusterCert - LoadCertFromSecret(secret *corev1.Secret) error -} - -type splitToPairNameFunc func(name string) string - -type VirtualClusterCertStore struct { - certs map[string]*VirtualClusterCert - pairNameFunc splitToPairNameFunc -} - -func NewCertStore() CertStore { - return &VirtualClusterCertStore{ - certs: make(map[string]*VirtualClusterCert), - pairNameFunc: SplitToPairName, - } -} - -func SplitToPairName(name string) string { - if strings.Contains(name, constants.KeyExtension) { - strArr := strings.Split(name, constants.KeyExtension) - return strArr[0] - } - - if strings.Contains(name, constants.CertExtension) { - strArr := strings.Split(name, constants.CertExtension) - return strArr[0] - } - - return name -} - -func (store *VirtualClusterCertStore) AddCert(cert *VirtualClusterCert) { - store.certs[cert.pairName] = cert -} - -func (store *VirtualClusterCertStore) GetCert(name string) *VirtualClusterCert { - for _, c := range store.certs { - if c.pairName == name { - return c - } - } - return nil -} - -func (store *VirtualClusterCertStore) CertList() []*VirtualClusterCert { - certs := make([]*VirtualClusterCert, 0, len(store.certs)) - - for _, c := range store.certs { - certs = append(certs, c) - } - - return certs -} - -func (store *VirtualClusterCertStore) LoadCertFromSecret(secret *corev1.Secret) error { - if len(secret.Data) == 0 { - return fmt.Errorf("cert data is empty") - } - - for name, data := range secret.Data { - pairName := store.pairNameFunc(name) - kc := store.GetCert(pairName) - if kc == nil { - kc = &VirtualClusterCert{ - pairName: pairName, - } - } - if strings.Contains(name, constants.CertExtension) { - kc.cert = data - } - if strings.Contains(name, constants.KeyExtension) { - kc.key = data - } - store.AddCert(kc) - } - - return nil -} diff --git a/pkg/kubenest/util/cert/store_test.go b/pkg/kubenest/util/cert/store_test.go deleted file mode 100644 index 4d1ed48a1..000000000 --- a/pkg/kubenest/util/cert/store_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package cert - -import ( - "testing" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -func TestVirtualClusterCertStore_AddCert(t *testing.T) { - store := NewCertStore() - cert := &VirtualClusterCert{pairName: "test-cert", cert: []byte("test-cert-data"), key: []byte("test-key-data")} - store.AddCert(cert) - - // 确保添加的证书可以被正确获取 - retrievedCert := store.GetCert("test-cert") - assert.NotNil(t, retrievedCert, "Expected to retrieve the added certificate") - assert.Equal(t, cert, retrievedCert, "Retrieved certificate should match the added certificate") -} - -func TestVirtualClusterCertStore_GetCert(t *testing.T) { - store := NewCertStore() - cert := &VirtualClusterCert{pairName: "test-cert", cert: []byte("test-cert-data"), key: []byte("test-key-data")} - store.AddCert(cert) - - // 测试获取存在的证书 - retrievedCert := store.GetCert("test-cert") - assert.NotNil(t, retrievedCert, "Expected to retrieve the added certificate") - assert.Equal(t, cert, retrievedCert, "Retrieved certificate should match the added certificate") - - // 测试获取不存在的证书 - retrievedCert = store.GetCert("nonexistent-cert") - assert.Nil(t, retrievedCert, "Expected no certificate for a nonexistent name") -} - -func TestVirtualClusterCertStore_CertList(t *testing.T) { - store := NewCertStore() - cert1 := &VirtualClusterCert{pairName: "cert1"} - cert2 := &VirtualClusterCert{pairName: "cert2"} - store.AddCert(cert1) - store.AddCert(cert2) - - certs := store.CertList() - assert.Len(t, certs, 2, "Expected the certificate list to contain 2 certificates") - assert.Contains(t, certs, cert1, "Expected cert1 to be in the certificate list") - assert.Contains(t, certs, cert2, "Expected cert2 to be in the certificate list") -} - -func TestVirtualClusterCertStore_LoadCertFromSecret(t *testing.T) { - store := NewCertStore() - - // 创建一个包含证书和密钥的 secret - secret := &corev1.Secret{ - Data: map[string][]byte{ - "test-cert" + constants.CertExtension: []byte("test-cert-data"), - "test-cert" + constants.KeyExtension: []byte("test-key-data"), - }, - } - - // 加载证书 - err := store.LoadCertFromSecret(secret) - assert.NoError(t, err, "Expected no error when loading cert from secret") - - // 确保可以成功获取证书 - cert := store.GetCert("test-cert") - assert.NotNil(t, cert, "Expected to retrieve the certificate after loading from secret") - assert.Equal(t, []byte("test-cert-data"), cert.cert, "Expected cert data to match") - assert.Equal(t, []byte("test-key-data"), cert.key, "Expected key data to match") -} - -func TestVirtualClusterCertStore_LoadCertFromEmptySecret(t *testing.T) { - store := NewCertStore() - - // 创建一个空的 secret - secret := &corev1.Secret{ - Data: map[string][]byte{}, - } - - // 尝试加载证书,应该返回错误 - err := store.LoadCertFromSecret(secret) - assert.Error(t, err, "Expected error when loading cert from empty secret") - assert.Equal(t, "cert data is empty", err.Error(), "Expected error message to match") -} diff --git a/pkg/kubenest/util/helper.go b/pkg/kubenest/util/helper.go deleted file mode 100644 index 771ac14c1..000000000 --- a/pkg/kubenest/util/helper.go +++ /dev/null @@ -1,492 +0,0 @@ -package util - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - utilyaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - "sigs.k8s.io/yaml" -) - -func CreateOrUpdateService(client clientset.Interface, svc *v1.Service) error { - _, err := client.CoreV1().Services(svc.GetNamespace()).Create(context.TODO(), svc, metav1.CreateOptions{}) - if err != nil { - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - currentSvc, err := client.CoreV1().Services(svc.GetNamespace()).Get(context.TODO(), svc.GetName(), metav1.GetOptions{}) - if err != nil { - return err - } - - svc.ResourceVersion = currentSvc.ResourceVersion - - _, err = client.CoreV1().Services(svc.GetNamespace()).Update(context.TODO(), svc, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil - }); err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated svc", "svc", svc.GetName()) - return nil -} - -func CreateOrUpdateDeployment(client clientset.Interface, deployment *appsv1.Deployment) error { - _, err := client.AppsV1().Deployments(deployment.GetNamespace()).Create(context.TODO(), deployment, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - _, err := client.AppsV1().Deployments(deployment.GetNamespace()).Update(context.TODO(), deployment, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated deployment", "deployment", deployment.GetName()) - return nil -} - -func DeleteDeployment(client clientset.Interface, deployment string, namespace string) error { - err := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("Deployment %s/%s not found, skip delete", deployment, namespace) - return nil - } - return err - } - klog.V(2).Infof("Delete deployment %s/%s success", deployment, namespace) - return nil -} - -func CreateOrUpdateDaemonSet(client clientset.Interface, daemonSet *appsv1.DaemonSet) error { - _, err := client.AppsV1().DaemonSets(daemonSet.GetNamespace()).Create(context.TODO(), daemonSet, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - _, err := client.AppsV1().DaemonSets(daemonSet.GetNamespace()).Update(context.TODO(), daemonSet, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated daemonSet", "daemonSet", daemonSet.GetName()) - return nil -} - -func DeleteDaemonSet(client clientset.Interface, daemonSet string, namespace string) error { - err := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonSet, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("DaemonSet %s/%s not found, skip delete", daemonSet, namespace) - return nil - } - return err - } - klog.V(2).Infof("Delete daemonSet %s/%s success", daemonSet, namespace) - return nil -} - -func CreateOrUpdateServiceAccount(client clientset.Interface, serviceAccount *v1.ServiceAccount) error { - _, err := client.CoreV1().ServiceAccounts(serviceAccount.GetNamespace()).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - _, err := client.CoreV1().ServiceAccounts(serviceAccount.GetNamespace()).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated serviceAccount", "serviceAccount", serviceAccount.GetName()) - return nil -} - -func DeleteServiceAccount(client clientset.Interface, serviceAccount string, namespace string) error { - err := client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), serviceAccount, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("ServiceAccount %s/%s not found, skip delete", serviceAccount, namespace) - return nil - } - return err - } - klog.V(2).Infof("Delete serviceAccount %s/%s success", serviceAccount, namespace) - return nil -} - -func CreateOrUpdateConfigMap(client clientset.Interface, configMap *v1.ConfigMap) error { - _, err := client.CoreV1().ConfigMaps(configMap.GetNamespace()).Create(context.TODO(), configMap, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - _, err := client.CoreV1().ConfigMaps(configMap.GetNamespace()).Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated configMap", "configMap", configMap.GetName()) - return nil -} - -func DeleteConfigmap(client clientset.Interface, cm string, namespace string) error { - err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), cm, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("Configmap %s/%s not found, skip delete", cm, namespace) - return nil - } - return err - } - klog.V(2).Infof("Delete configmap %s/%s success", cm, namespace) - return nil -} - -func CreateOrUpdateStatefulSet(client clientset.Interface, statefulSet *appsv1.StatefulSet) error { - _, err := client.AppsV1().StatefulSets(statefulSet.GetNamespace()).Create(context.TODO(), statefulSet, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - older, err := client.AppsV1().StatefulSets(statefulSet.GetNamespace()).Get(context.TODO(), statefulSet.GetName(), metav1.GetOptions{}) - if err != nil { - return err - } - - statefulSet.ResourceVersion = older.ResourceVersion - _, err = client.AppsV1().StatefulSets(statefulSet.GetNamespace()).Update(context.TODO(), statefulSet, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(5).InfoS("Successfully created or updated statefulset", "statefulset", statefulSet.GetName) - return nil -} - -func DeleteStatefulSet(client clientset.Interface, sts string, namespace string) error { - err := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), sts, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("Statefulset %s/%s not found, skip delete", sts, namespace) - return nil - } - return err - } - klog.V(2).Infof("Delete statefulset %s/%s success", sts, namespace) - return nil -} - -func CreateOrUpdateClusterSA(client clientset.Interface, serviceAccount *v1.ServiceAccount, namespace string) error { - _, err := client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}) - - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - older, err := client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), serviceAccount.GetName(), metav1.GetOptions{}) - if err != nil { - return err - } - - serviceAccount.ResourceVersion = older.ResourceVersion - _, err = client.CoreV1().ServiceAccounts(namespace).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(4).InfoS("Successfully created or updated serviceAccount", "serviceAccount", serviceAccount.GetName) - return nil -} - -func CreateOrUpdateClusterRole(client clientset.Interface, clusterrole *rbacv1.ClusterRole) error { - _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterrole, metav1.CreateOptions{}) - - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - older, err := client.RbacV1().ClusterRoles().Get(context.TODO(), clusterrole.GetName(), metav1.GetOptions{}) - if err != nil { - return err - } - - clusterrole.ResourceVersion = older.ResourceVersion - _, err = client.RbacV1().ClusterRoles().Update(context.TODO(), clusterrole, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(4).InfoS("Successfully created or updated clusterrole", "clusterrole", clusterrole.GetName) - return nil -} - -func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterroleBinding *rbacv1.ClusterRoleBinding) error { - _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterroleBinding, metav1.CreateOptions{}) - - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - - older, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), clusterroleBinding.GetName(), metav1.GetOptions{}) - if err != nil { - return err - } - - clusterroleBinding.ResourceVersion = older.ResourceVersion - _, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterroleBinding, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - klog.V(4).InfoS("Successfully created or updated clusterrolebinding", "clusterrolebinding", clusterroleBinding.GetName) - return nil -} - -func CreateObject(dynamicClient dynamic.Interface, namespace string, name string, obj *unstructured.Unstructured) error { - gvk := obj.GroupVersionKind() - gvr, _ := meta.UnsafeGuessKindToResource(gvk) - klog.V(2).Infof("Create %s, name: %s, namespace: %s", gvr.String(), name, namespace) - _, err := dynamicClient.Resource(gvr).Namespace(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) - if err != nil { - if apierrors.IsAlreadyExists(err) { - klog.Warningf("%s %s already exists", gvr.String(), name) - return nil - } - return err - } - return nil -} - -func ApplyObject(dynamicClient dynamic.Interface, obj *unstructured.Unstructured) error { - gvk := obj.GroupVersionKind() - gvr, _ := meta.UnsafeGuessKindToResource(gvk) - namespace := obj.GetNamespace() - name := obj.GetName() - - klog.V(2).Infof("Apply %s, name: %s, namespace: %s", gvr.String(), name, namespace) - - resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) - - // Get the existing resource - existingObj, err := resourceClient.Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - // If not found, create the resource - _, err = resourceClient.Create(context.TODO(), obj, metav1.CreateOptions{}) - if err != nil { - return err - } - klog.V(2).Infof("Created %s %s in namespace %s", gvr.String(), name, namespace) - return nil - } - return err - } - - // If found, apply changes using Server-Side Apply - obj.SetResourceVersion(existingObj.GetResourceVersion()) - _, err = resourceClient.Apply(context.TODO(), name, obj, metav1.ApplyOptions{ - FieldManager: "vc-operator-manager", - Force: true, - }) - if err != nil { - klog.V(2).Infof("Failed to apply changes to %s %s: %v", gvr.String(), name, err) - return fmt.Errorf("failed to apply changes to %s %s: %v", gvr.String(), name, err) - } - - klog.V(2).Infof("Applied changes to %s %s in namespace %s", gvr.String(), name, namespace) - return nil -} - -func DeleteObject(dynamicClient dynamic.Interface, namespace string, name string, obj *unstructured.Unstructured) error { - gvk := obj.GroupVersionKind() - gvr, _ := meta.UnsafeGuessKindToResource(gvk) - klog.V(2).Infof("Delete %s, name: %s, namespace: %s", gvr.String(), name, namespace) - err := dynamicClient.Resource(gvr).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - klog.Warningf("%s %s already deleted", gvr.String(), name) - return nil - } - return err - } - return nil -} - -// DecodeYAML unmarshals a YAML document or multidoc YAML as unstructured -// objects, placing each decoded object into a channel. -// code from https://github.com/kubernetes/client-go/issues/216 -func DecodeYAML(data []byte) (<-chan *unstructured.Unstructured, <-chan error) { - var ( - chanErr = make(chan error) - chanObj = make(chan *unstructured.Unstructured) - multidocReader = utilyaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(data))) - ) - - go func() { - defer close(chanErr) - defer close(chanObj) - - // Iterate over the data until Read returns io.EOF. Every successful - // read returns a complete YAML document. - for { - buf, err := multidocReader.Read() - if err != nil { - if err == io.EOF { - return - } - klog.Warningf("failed to read yaml data") - chanErr <- errors.Wrap(err, "failed to read yaml data") - return - } - - // Do not use this YAML doc if it is unkind. - var typeMeta runtime.TypeMeta - if err := yaml.Unmarshal(buf, &typeMeta); err != nil { - continue - } - if typeMeta.Kind == "" { - continue - } - - // Define the unstructured object into which the YAML document will be - // unmarshaled. - obj := &unstructured.Unstructured{ - Object: map[string]interface{}{}, - } - - // Unmarshal the YAML document into the unstructured object. - if err := yaml.Unmarshal(buf, &obj.Object); err != nil { - klog.Warningf("failed to unmarshal yaml data") - chanErr <- errors.Wrap(err, "failed to unmarshal yaml data") - return - } - - // Place the unstructured object into the channel. - chanObj <- obj - } - }() - - return chanObj, chanErr -} - -// ForEachObjectInYAMLActionFunc is a function that is executed against each -// object found in a YAML document. -// When a non-empty namespace is provided then the object is assigned the -// namespace prior to any other actions being performed with or to the object. -type ForEachObjectInYAMLActionFunc func(context.Context, dynamic.Interface, *unstructured.Unstructured) error - -// ForEachObjectInYAML excutes actionFn for each object in the provided YAML. -// If an error is returned then no further objects are processed. -// The data may be a single YAML document or multidoc YAML. -// When a non-empty namespace is provided then all objects are assigned the -// the namespace prior to any other actions being performed with or to the -// object. -func ForEachObjectInYAML( - ctx context.Context, - dynamicClient dynamic.Interface, - data []byte, - namespace string, - actionFn ForEachObjectInYAMLActionFunc) error { - chanObj, chanErr := DecodeYAML(data) - for { - select { - case obj := <-chanObj: - if obj == nil { - return nil - } - if namespace != "" { - obj.SetNamespace(namespace) - } - klog.Infof("get object %s/%s", obj.GetNamespace(), obj.GetName()) - if err := actionFn(ctx, dynamicClient, obj); err != nil { - return err - } - case err := <-chanErr: - if err == nil { - return nil - } - klog.Errorf("DecodeYaml error %v", err) - return errors.Wrap(err, "received error while decoding yaml") - } - } -} - -func ReplaceObject(dynamicClient dynamic.Interface, obj *unstructured.Unstructured) error { - gvk := obj.GroupVersionKind() - gvr, _ := meta.UnsafeGuessKindToResource(gvk) - namespace := obj.GetNamespace() - name := obj.GetName() - - klog.V(2).Infof("Replace %s, name: %s, namespace: %s", gvr.String(), name, namespace) - - resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) - - // Get the existing resource - _, err := resourceClient.Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - // If not found, create the resource - _, err = resourceClient.Create(context.TODO(), obj, metav1.CreateOptions{}) - if err != nil { - return err - } - klog.V(2).Infof("Created %s %s in namespace %s", gvr.String(), name, namespace) - return nil - } - return err - } - - // If found, delete the existing resource - err = resourceClient.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - klog.V(2).Infof("Failed to delete existing %s %s: %v", gvr.String(), name, err) - return fmt.Errorf("failed to delete existing %s %s: %v", gvr.String(), name, err) - } - - klog.V(2).Infof("Deleted existing %s %s in namespace %s", gvr.String(), name, namespace) - - // Create the resource with the new object - _, err = resourceClient.Create(context.TODO(), obj, metav1.CreateOptions{}) - if err != nil { - klog.V(2).Infof("Failed to create %s %s: %v", gvr.String(), name, err) - return fmt.Errorf("failed to create %s %s: %v", gvr.String(), name, err) - } - - klog.V(2).Infof("Replaced %s %s in namespace %s", gvr.String(), name, namespace) - return nil -} diff --git a/pkg/kubenest/util/helper_test.go b/pkg/kubenest/util/helper_test.go deleted file mode 100644 index c1d58d13c..000000000 --- a/pkg/kubenest/util/helper_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package util - -import ( - "context" - "fmt" - "testing" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -// createKubeConfig creates a kubeConfig from the given config and masterOverride. -func createKubeConfig() (*restclient.Config, error) { - kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: "../../../ignore_dir/local.conf"}, - &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - return nil, err - } - - kubeConfig.DisableCompression = true - kubeConfig.QPS = 40.0 - kubeConfig.Burst = 60 - - return kubeConfig, nil -} - -func prepare() (kubernetes.Interface, error) { - // Prepare kube config. - kubeConfig, err := createKubeConfig() - if err != nil { - return nil, err - } - - hostKubeClient, err := kubernetes.NewForConfig(kubeConfig) - if err != nil { - return nil, fmt.Errorf("could not create clientset: %v", err) - } - - return hostKubeClient, nil -} - -func TestCreateOrUpdate(t *testing.T) { - client, err := prepare() - if err != nil { - t.Logf("failed to prepare client: %v", err) - return - } - - tests := []struct { - name string - input *v1.Service - want bool - }{ - { - name: "basic", - input: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-nodeport-service", - Namespace: "default", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - Selector: map[string]string{ - "app": "my-app", - }, - Ports: []v1.ServicePort{ - { - Port: 30007, // 服务的端口 - Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080, // Pod 中的目标端口 - }, - NodePort: 30007, // 固定的 NodePort 端口 - }, - }, - }, - }, - want: true, - }, - { - name: "same port", - input: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-nodeport-service", - Namespace: "default", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - Selector: map[string]string{ - "app": "my-app", - }, - Ports: []v1.ServicePort{ - { - Port: 30007, // 服务的端口 - Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080, // Pod 中的目标端口 - }, - NodePort: 30007, // 固定的 NodePort 端口 - }, - }, - }, - }, - want: true, - }, - { - name: "different port", - input: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-nodeport-service", - Namespace: "default", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - Selector: map[string]string{ - "app": "my-app", - }, - Ports: []v1.ServicePort{ - { - Port: 30077, // 服务的端口 - Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080, // Pod 中的目标端口 - }, - NodePort: 30077, // 固定的 NodePort 端口 - }, - }, - }, - }, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := CreateOrUpdateService(client, tt.input) - if err != nil { - t.Fatalf("CreateOrUpdateService() error = %v", err) - } - }) - } -} - -func TestCreateSvc(t *testing.T) { - client, err := prepare() - if err != nil { - t.Logf("failed to prepare client: %v", err) - return - } - - tests := []struct { - name string - input *v1.Service - update *v1.Service - want bool - }{ - { - name: "ipv4 only", - input: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-nodeport-service", - Namespace: "default", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - Selector: map[string]string{ - "app": "my-app", - }, - Ports: []v1.ServicePort{ - { - Port: 30007, // 服务的端口 - Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080, // Pod 中的目标端口 - }, - // NodePort: 30007, // 固定的 NodePort 端口 - }, - }, - }, - }, - update: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-nodeport-service", - Namespace: "default", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - Selector: map[string]string{ - "app": "my-app", - }, - Ports: []v1.ServicePort{ - { - Port: 30007, // 服务的端口 - Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{ - IntVal: 8080, // Pod 中的目标端口 - }, - // NodePort: 30007, // 固定的 NodePort 端口 - }, - }, - }, - }, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := CreateOrUpdateService(client, tt.input) - if err != nil { - t.Fatalf("CreateOrUpdateService() error = %v", err) - } - svc, err := client.CoreV1().Services(tt.input.Namespace).Get(context.TODO(), tt.input.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("CreateOrUpdateService() error = %v", err) - } - nodePort := svc.Spec.Ports[0].NodePort - tt.update.Spec.Ports[0].NodePort = nodePort - tt.update.Spec.Ports[0].Port = nodePort - tt.update.Spec.Ports[0].TargetPort = intstr.IntOrString{ - IntVal: nodePort, - } - err = CreateOrUpdateService(client, tt.update) - if err != nil { - t.Fatalf("CreateOrUpdateService() error = %v", err) - } - }) - } -} diff --git a/pkg/kubenest/util/image.go b/pkg/kubenest/util/image.go deleted file mode 100644 index c7cfcd307..000000000 --- a/pkg/kubenest/util/image.go +++ /dev/null @@ -1,36 +0,0 @@ -package util - -import ( - "os" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func GetImageMessage() (imageRepository string, imageVersion string) { - imageRepository = os.Getenv(constants.DefaultImageRepositoryEnv) - if len(imageRepository) == 0 { - imageRepository = utils.DefaultImageRepository - } - imageVersion = os.Getenv(constants.DefaultImageVersionEnv) - if len(imageVersion) == 0 { - imageVersion = utils.DefaultImageVersion - } - return imageRepository, imageVersion -} - -func GetCoreDNSImageTag() string { - coreDNSImageTag := os.Getenv(constants.DefaultCoreDNSImageTagEnv) - if coreDNSImageTag == "" { - coreDNSImageTag = utils.DefaultCoreDNSImageTag - } - return coreDNSImageTag -} - -func GetVirtualControllerLabel() string { - lb := os.Getenv(constants.DefaultVirtualControllerLabelEnv) - if len(lb) == 0 { - return utils.LabelNodeRoleControlPlane - } - return lb -} diff --git a/pkg/kubenest/util/image_test.go b/pkg/kubenest/util/image_test.go deleted file mode 100644 index eddb9622e..000000000 --- a/pkg/kubenest/util/image_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package util - -import ( - "os" - "testing" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func TestGetImageMessage(t *testing.T) { - // Set up environment variables for the test - defaultRepo := "custom-repo" - defaultVersion := "custom-version" - - os.Setenv(constants.DefaultImageRepositoryEnv, defaultRepo) - os.Setenv(constants.DefaultImageVersionEnv, defaultVersion) - - defer func() { - // Cleanup environment variables after test - os.Unsetenv(constants.DefaultImageRepositoryEnv) - os.Unsetenv(constants.DefaultImageVersionEnv) - }() - - // Test case where env variables are set - repo, version := GetImageMessage() - if repo != defaultRepo { - t.Errorf("GetImageMessage() repo = %v, want %v", repo, defaultRepo) - } - if version != defaultVersion { - t.Errorf("GetImageMessage() version = %v, want %v", version, defaultVersion) - } - - // Test case where env variables are not set - os.Unsetenv(constants.DefaultImageRepositoryEnv) - os.Unsetenv(constants.DefaultImageVersionEnv) - - repo, version = GetImageMessage() - if repo != utils.DefaultImageRepository { - t.Errorf("GetImageMessage() repo = %v, want %v", repo, utils.DefaultImageRepository) - } - if version != utils.DefaultImageVersion { - t.Errorf("GetImageMessage() version = %v, want %v", version, utils.DefaultImageVersion) - } -} - -func TestGetCoreDNSImageTag(t *testing.T) { - // Set up environment variable for the test - defaultCoreDNSImageTag := "custom-coredns-tag" - os.Setenv(constants.DefaultCoreDNSImageTagEnv, defaultCoreDNSImageTag) - - defer func() { - // Cleanup environment variable after test - os.Unsetenv(constants.DefaultCoreDNSImageTagEnv) - }() - - // Test case where env variable is set - coreDNSImageTag := GetCoreDNSImageTag() - if coreDNSImageTag != defaultCoreDNSImageTag { - t.Errorf("GetCoreDNSImageTag() = %v, want %v", coreDNSImageTag, defaultCoreDNSImageTag) - } - - // Test case where env variable is not set - os.Unsetenv(constants.DefaultCoreDNSImageTagEnv) - coreDNSImageTag = GetCoreDNSImageTag() - if coreDNSImageTag != utils.DefaultCoreDNSImageTag { - t.Errorf("GetCoreDNSImageTag() = %v, want %v", coreDNSImageTag, utils.DefaultCoreDNSImageTag) - } -} - -func TestGetVirtualControllerLabel(t *testing.T) { - // Set up environment variable for the test - defaultLabel := "custom-label" - os.Setenv(constants.DefaultVirtualControllerLabelEnv, defaultLabel) - - defer func() { - // Cleanup environment variable after test - os.Unsetenv(constants.DefaultVirtualControllerLabelEnv) - }() - - // Test case where env variable is set - label := GetVirtualControllerLabel() - if label != defaultLabel { - t.Errorf("GetVirtualControllerLabel() = %v, want %v", label, defaultLabel) - } - - // Test case where env variable is not set - os.Unsetenv(constants.DefaultVirtualControllerLabelEnv) - label = GetVirtualControllerLabel() - if label != utils.LabelNodeRoleControlPlane { - t.Errorf("GetVirtualControllerLabel() = %v, want %v", label, utils.LabelNodeRoleControlPlane) - } -} diff --git a/pkg/kubenest/util/kubeconfig.go b/pkg/kubenest/util/kubeconfig.go deleted file mode 100644 index 8536a9c47..000000000 --- a/pkg/kubenest/util/kubeconfig.go +++ /dev/null @@ -1,38 +0,0 @@ -package util - -import ( - "fmt" - - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -func CreateWithCerts(serverURL, clusterName, userName string, caCert []byte, clientKey []byte, clientCert []byte) *clientcmdapi.Config { - config := CreateBasic(serverURL, clusterName, userName, caCert) - config.AuthInfos[userName] = &clientcmdapi.AuthInfo{ - ClientKeyData: clientKey, - ClientCertificateData: clientCert, - } - return config -} - -func CreateBasic(serverURL, clusterName, userName string, caCert []byte) *clientcmdapi.Config { - // Use the cluster and the username as the context name - contextName := fmt.Sprintf("%s@%s", userName, clusterName) - - return &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - clusterName: { - Server: serverURL, - CertificateAuthorityData: caCert, - }, - }, - Contexts: map[string]*clientcmdapi.Context{ - contextName: { - Cluster: clusterName, - AuthInfo: userName, - }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{}, - CurrentContext: contextName, - } -} diff --git a/pkg/kubenest/util/kubeconfig_test.go b/pkg/kubenest/util/kubeconfig_test.go deleted file mode 100644 index e92c9fad3..000000000 --- a/pkg/kubenest/util/kubeconfig_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package util - -import ( - "reflect" - "testing" -) - -func TestCreateBasic(t *testing.T) { - serverURL := "https://test-server" - clusterName := "test-cluster" - userName := "test-user" - caCert := []byte("test-ca-cert") - - config := CreateBasic(serverURL, clusterName, userName, caCert) - - // Check if the returned config is as expected - if config.CurrentContext != "test-user@test-cluster" { - t.Errorf("CreateBasic() CurrentContext = %v, want %v", config.CurrentContext, "test-user@test-cluster") - } - - if cluster, ok := config.Clusters[clusterName]; !ok { - t.Errorf("CreateBasic() missing cluster %v", clusterName) - } else { - if cluster.Server != serverURL { - t.Errorf("CreateBasic() cluster.Server = %v, want %v", cluster.Server, serverURL) - } - if !reflect.DeepEqual(cluster.CertificateAuthorityData, caCert) { - t.Errorf("CreateBasic() cluster.CertificateAuthorityData = %v, want %v", cluster.CertificateAuthorityData, caCert) - } - } - - if ctx, ok := config.Contexts["test-user@test-cluster"]; !ok { - t.Errorf("CreateBasic() missing context %v", "test-user@test-cluster") - } else { - if ctx.Cluster != clusterName { - t.Errorf("CreateBasic() ctx.Cluster = %v, want %v", ctx.Cluster, clusterName) - } - if ctx.AuthInfo != userName { - t.Errorf("CreateBasic() ctx.AuthInfo = %v, want %v", ctx.AuthInfo, userName) - } - } -} - -func TestCreateWithCerts(t *testing.T) { - serverURL := "https://test-server" - clusterName := "test-cluster" - userName := "test-user" - caCert := []byte("test-ca-cert") - clientKey := []byte("test-client-key") - clientCert := []byte("test-client-cert") - - config := CreateWithCerts(serverURL, clusterName, userName, caCert, clientKey, clientCert) - - // Validate the basic config part - if config.CurrentContext != "test-user@test-cluster" { - t.Errorf("CreateWithCerts() CurrentContext = %v, want %v", config.CurrentContext, "test-user@test-cluster") - } - - // Validate AuthInfo part - if authInfo, ok := config.AuthInfos[userName]; !ok { - t.Errorf("CreateWithCerts() missing AuthInfo for %v", userName) - } else { - if !reflect.DeepEqual(authInfo.ClientKeyData, clientKey) { - t.Errorf("CreateWithCerts() authInfo.ClientKeyData = %v, want %v", authInfo.ClientKeyData, clientKey) - } - if !reflect.DeepEqual(authInfo.ClientCertificateData, clientCert) { - t.Errorf("CreateWithCerts() authInfo.ClientCertificateData = %v, want %v", authInfo.ClientCertificateData, clientCert) - } - } -} diff --git a/pkg/kubenest/util/name.go b/pkg/kubenest/util/name.go deleted file mode 100644 index da1c03f88..000000000 --- a/pkg/kubenest/util/name.go +++ /dev/null @@ -1,43 +0,0 @@ -package util - -import ( - "fmt" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -func GetAPIServerName(name string) string { - return fmt.Sprintf("%s-%s", name, "apiserver") -} - -func GetEtcdClientServerName(name string) string { - return fmt.Sprintf("%s-%s", name, "etcd-client") -} - -func GetKonnectivityServerName(name string) string { - return fmt.Sprintf("%s-%s", name, "konnectivity-server") -} - -func GetKonnectivityAPIServerName(name string) string { - return fmt.Sprintf("%s-%s-konnectivity", name, "apiserver") -} - -func GetEtcdServerName(name string) string { - return fmt.Sprintf("%s-%s", name, "etcd") -} - -func GetCertName(name string) string { - return fmt.Sprintf("%s-%s", name, "cert") -} - -func GetEtcdCertName(name string) string { - return fmt.Sprintf("%s-%s", name, "etcd-cert") -} - -func GetAdminConfigSecretName(name string) string { - return fmt.Sprintf("%s-%s", name, constants.AdminConfig) -} - -func GetAdminConfigClusterIPSecretName(name string) string { - return fmt.Sprintf("%s-%s", name, "admin-config-clusterip") -} diff --git a/pkg/kubenest/util/name_test.go b/pkg/kubenest/util/name_test.go deleted file mode 100644 index 960b93b12..000000000 --- a/pkg/kubenest/util/name_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package util - -import ( - "testing" - - "github.com/kosmos.io/kosmos/pkg/kubenest/constants" -) - -func TestGetAPIServerName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-apiserver" - if result := GetAPIServerName(name); result != expected { - t.Errorf("GetAPIServerName() = %v, want %v", result, expected) - } -} - -func TestGetEtcdClientServerName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-etcd-client" - if result := GetEtcdClientServerName(name); result != expected { - t.Errorf("GetEtcdClientServerName() = %v, want %v", result, expected) - } -} - -func TestGetKonnectivityServerName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-konnectivity-server" - if result := GetKonnectivityServerName(name); result != expected { - t.Errorf("GetKonnectivityServerName() = %v, want %v", result, expected) - } -} - -func TestGetKonnectivityAPIServerName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-apiserver-konnectivity" - if result := GetKonnectivityAPIServerName(name); result != expected { - t.Errorf("GetKonnectivityAPIServerName() = %v, want %v", result, expected) - } -} - -func TestGetEtcdServerName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-etcd" - if result := GetEtcdServerName(name); result != expected { - t.Errorf("GetEtcdServerName() = %v, want %v", result, expected) - } -} - -func TestGetCertName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-cert" - if result := GetCertName(name); result != expected { - t.Errorf("GetCertName() = %v, want %v", result, expected) - } -} - -func TestGetEtcdCertName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-etcd-cert" - if result := GetEtcdCertName(name); result != expected { - t.Errorf("GetEtcdCertName() = %v, want %v", result, expected) - } -} - -func TestGetAdminConfigSecretName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-" + constants.AdminConfig - if result := GetAdminConfigSecretName(name); result != expected { - t.Errorf("GetAdminConfigSecretName() = %v, want %v", result, expected) - } -} - -func TestGetAdminConfigClusterIPSecretName(t *testing.T) { - name := "test-cluster" - expected := "test-cluster-admin-config-clusterip" - if result := GetAdminConfigClusterIPSecretName(name); result != expected { - t.Errorf("GetAdminConfigClusterIPSecretName() = %v, want %v", result, expected) - } -} diff --git a/pkg/kubenest/util/node.go b/pkg/kubenest/util/node.go deleted file mode 100644 index faa7c8af6..000000000 --- a/pkg/kubenest/util/node.go +++ /dev/null @@ -1,111 +0,0 @@ -package util - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - drain "k8s.io/kubectl/pkg/drain" -) - -func IsNodeReady(conditions []v1.NodeCondition) bool { - for _, condition := range conditions { - if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue { - return true - } - } - return false -} - -// DrainNode cordons and drains a node. -func DrainNode(ctx context.Context, nodeName string, client kubernetes.Interface, node *v1.Node, drainWaitSeconds int, isHostCluster bool) error { - if client == nil { - return fmt.Errorf("K8sClient not set") - } - if node == nil { - return fmt.Errorf("node not set") - } - if nodeName == "" { - return fmt.Errorf("node name not set") - } - helper := &drain.Helper{ - Ctx: ctx, - Client: client, - Force: true, - GracePeriodSeconds: -1, - IgnoreAllDaemonSets: true, - Out: os.Stdout, - ErrOut: os.Stdout, - DisableEviction: !isHostCluster, - // We want to proceed even when pods are using emptyDir volumes - DeleteEmptyDirData: true, - Timeout: time.Duration(drainWaitSeconds) * time.Second, - } - if err := drain.RunCordonOrUncordon(helper, node, true); err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return fmt.Errorf("error cordoning node: %v", err) - } - if err := drain.RunNodeDrain(helper, nodeName); err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return fmt.Errorf("error draining node: %v", err) - } - return nil -} - -func GetAPIServerNodes(rootClientSet kubernetes.Interface, namespace string) (*v1.NodeList, error) { - klog.V(4).Info("begin to get API server nodes") - - apiServerPods, err := rootClientSet.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "virtualCluster-app=apiserver", - }) - if err != nil { - klog.Errorf("failed to list kube-apiserver pod: %v", err) - return nil, errors.Wrap(err, "failed to list kube-apiserver pods") - } - - var nodeNames []string - for _, pod := range apiServerPods.Items { - klog.V(4).Infof("API server pod %s is on node: %s", pod.Name, pod.Spec.NodeName) - nodeNames = append(nodeNames, pod.Spec.NodeName) - } - - if len(nodeNames) == 0 { - klog.Errorf("no API server pods found in the namespace") - return nil, fmt.Errorf("no API server pods found") - } - - var nodesList []v1.Node - for _, nodeName := range nodeNames { - node, err := rootClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) - if err != nil { - klog.Errorf("failed to get node %s: %v", nodeName, err) - return nil, fmt.Errorf("failed to get node %s: %v", nodeName, err) - } - klog.V(4).Infof("Found node: %s", node.Name) - nodesList = append(nodesList, *node) - } - - nodes := &v1.NodeList{ - Items: nodesList, - } - - klog.V(4).Infof("got %d API server nodes", len(nodes.Items)) - - if len(nodes.Items) == 0 { - klog.Errorf("no nodes found for the API server pods") - return nil, fmt.Errorf("no nodes found for the API server pods") - } - - return nodes, nil -} diff --git a/pkg/kubenest/util/node_test.go b/pkg/kubenest/util/node_test.go deleted file mode 100644 index 90305351d..000000000 --- a/pkg/kubenest/util/node_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package util - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/fake" - clientTesting "k8s.io/client-go/testing" -) - -// TestIsNodeReady tests the IsNodeReady function -func TestIsNodeReady(t *testing.T) { - tests := []struct { - name string - conditions []v1.NodeCondition - want bool - }{ - { - name: "node is ready", - conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - }, - }, - want: true, - }, - { - name: "node is not ready", - conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - Status: v1.ConditionFalse, - }, - }, - want: false, - }, - { - name: "no conditions", - conditions: []v1.NodeCondition{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := IsNodeReady(tt.conditions) - if got != tt.want { - t.Errorf("IsNodeReady() = %v, want %v", got, tt.want) - } - }) - } -} - -// TestDrainNode tests the DrainNode function -func TestDrainNode(t *testing.T) { - fakeNodeName := "fake-node" - fakeNode := &v1.Node{ - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - }, - }, - }, - } - - ctx := context.TODO() - fakeClient := fake.NewSimpleClientset(fakeNode) - - tests := []struct { - name string - nodeName string - client *fake.Clientset - node *v1.Node - drainWaitSecs int - isHostCluster bool - wantErr bool - prepare func() - }{ - { - name: "successful drain and cordon", - nodeName: fakeNodeName, - client: fakeClient, - node: fakeNode, - drainWaitSecs: 30, - isHostCluster: true, - wantErr: false, - }, - //{ - // name: "missing client", - // nodeName: fakeNodeName, - // client: nil, - // node: fakeNode, - // drainWaitSecs: 30, - // isHostCluster: true, - // wantErr: true, - //}, - { - name: "missing node", - nodeName: fakeNodeName, - client: fakeClient, - node: nil, - drainWaitSecs: 30, - isHostCluster: true, - wantErr: true, - }, - { - name: "missing node name", - nodeName: "", - client: fakeClient, - node: fakeNode, - drainWaitSecs: 30, - isHostCluster: true, - wantErr: true, - }, - { - name: "node not found error", - nodeName: "non-existent-node", - client: fakeClient, - node: fakeNode, - drainWaitSecs: 30, - isHostCluster: true, - wantErr: false, - prepare: func() { - fakeClient.Fake.PrependReactor("get", "nodes", func(action clientTesting.Action) (bool, runtime.Object, error) { - return true, nil, apierrors.NewNotFound(v1.Resource("nodes"), "non-existent-node") - }) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.prepare != nil { - tt.prepare() - } - - err := DrainNode(ctx, tt.nodeName, tt.client, tt.node, tt.drainWaitSecs, tt.isHostCluster) - if (err != nil) != tt.wantErr { - t.Errorf("DrainNode() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestGetAPIServerNodes(t *testing.T) { - namespace := "test-namespace" - - t.Run("Successfully Get API Server Nodes", func(t *testing.T) { - apiServerPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "apiserver-pod-1", - Namespace: namespace, - Labels: map[string]string{"virtualCluster-app": "apiserver"}, - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - } - apiServerNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Status: v1.NodeStatus{ - Addresses: []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: "192.168.1.10"}, - }, - }, - } - - client := fake.NewSimpleClientset(apiServerPod, apiServerNode) - - nodes, err := GetAPIServerNodes(client, namespace) - assert.NoError(t, err, "Should successfully get API server nodes") - assert.Len(t, nodes.Items, 1, "Expected exactly one node") - assert.Equal(t, "node1", nodes.Items[0].Name, "Node name should match") - }) - - t.Run("No API Server Pods Found", func(t *testing.T) { - client := fake.NewSimpleClientset() - - nodes, err := GetAPIServerNodes(client, namespace) - assert.Error(t, err, "Should fail when no API server pods are found") - assert.Contains(t, err.Error(), "no API server pods found", "Error message should match") - assert.Nil(t, nodes, "Nodes should be nil when no API server pods are found") - }) - - t.Run("Error Listing API Server Pods", func(t *testing.T) { - client := fake.NewSimpleClientset() - client.PrependReactor("list", "pods", func(action clientTesting.Action) (bool, runtime.Object, error) { - return true, nil, errors.New("mock error: failed to list pods") - }) - - nodes, err := GetAPIServerNodes(client, namespace) - assert.Error(t, err, "Should fail when listing pods returns an error") - assert.Contains(t, err.Error(), "failed to list kube-apiserver pods", "Error message should match") - assert.Nil(t, nodes, "Nodes should be nil when pod listing fails") - }) - - t.Run("Error Fetching Node Information", func(t *testing.T) { - apiServerPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "apiserver-pod-1", - Namespace: namespace, - Labels: map[string]string{"virtualCluster-app": "apiserver"}, - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - } - - client := fake.NewSimpleClientset(apiServerPod) - client.PrependReactor("get", "nodes", func(action clientTesting.Action) (bool, runtime.Object, error) { - return true, nil, errors.New("mock error: failed to get node") - }) - - nodes, err := GetAPIServerNodes(client, namespace) - assert.Error(t, err, "Should fail when fetching node information returns an error") - assert.Contains(t, err.Error(), "failed to get node", "Error message should match") - assert.Nil(t, nodes, "Nodes should be nil when node fetching fails") - }) - - t.Run("Pod Exists but Node Not Found", func(t *testing.T) { - apiServerPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "apiserver-pod-1", - Namespace: namespace, - Labels: map[string]string{"virtualCluster-app": "apiserver"}, - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - } - - client := fake.NewSimpleClientset(apiServerPod) - - nodes, err := GetAPIServerNodes(client, namespace) - assert.Error(t, err, "Should fail when node does not exist") - assert.Contains(t, err.Error(), "failed to get node", "Error message should match") - assert.Nil(t, nodes, "Nodes should be nil when node is not found") - }) -} diff --git a/pkg/kubenest/util/template.go b/pkg/kubenest/util/template.go deleted file mode 100644 index 62effa72b..000000000 --- a/pkg/kubenest/util/template.go +++ /dev/null @@ -1,32 +0,0 @@ -package util - -import ( - "bytes" - "fmt" - "text/template" -) - -// pluginOptions -func defaultValue(value interface{}, defaultVal string) string { - if str, ok := value.(string); ok && str != "" { - return str - } - return defaultVal -} - -// ParseTemplate validates and parses passed as argument template -func ParseTemplate(strtmpl string, obj interface{}) (string, error) { - var buf bytes.Buffer - tmpl := template.New("template").Funcs(template.FuncMap{ - "defaultValue": defaultValue, - }) - tmpl, err := tmpl.Parse(strtmpl) - if err != nil { - return "", fmt.Errorf("error when parsing template, err: %w", err) - } - err = tmpl.Execute(&buf, obj) - if err != nil { - return "", fmt.Errorf("error when executing template, err: %w", err) - } - return buf.String(), nil -} diff --git a/pkg/kubenest/util/template_test.go b/pkg/kubenest/util/template_test.go deleted file mode 100644 index c6412a772..000000000 --- a/pkg/kubenest/util/template_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package util - -import ( - "testing" -) - -func TestParseTemplate(t *testing.T) { - tests := []struct { - name string - strtmpl string - obj interface{} - want string - expectErr bool - }{ - { - name: "valid template with defaultValue", - strtmpl: `Hello, {{defaultValue .Name "World"}}!`, - obj: map[string]interface{}{"Name": "Alice"}, - want: "Hello, Alice!", - expectErr: false, - }, - { - name: "valid template with default value", - strtmpl: `Hello, {{defaultValue .Name "World"}}!`, - obj: map[string]interface{}{}, - want: "Hello, World!", - expectErr: false, - }, - { - name: "invalid template", - strtmpl: `Hello, {{.Name`, // Missing closing braces - obj: map[string]interface{}{"Name": "Alice"}, - want: "", - expectErr: true, - }, - //{ - // name: "template execution error", - // strtmpl: `Hello, {{.Name}}!`, - // obj: nil, // obj is nil, so this will fail during execution - // want: "", - // expectErr: true, - //}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ParseTemplate(tt.strtmpl, tt.obj) - if (err != nil) != tt.expectErr { - t.Errorf("ParseTemplate() error = %v, expectErr %v", err, tt.expectErr) - return - } - if got != tt.want { - t.Errorf("ParseTemplate() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/kubenest/util/util.go b/pkg/kubenest/util/util.go deleted file mode 100644 index 8117c1db3..000000000 --- a/pkg/kubenest/util/util.go +++ /dev/null @@ -1,282 +0,0 @@ -package util - -import ( - "crypto/rand" - "encoding/base64" - "fmt" - "math/big" - "net" - "strings" - - "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/utils" -) - -func FindGlobalNode(nodeName string, globalNodes []v1alpha1.GlobalNode) (*v1alpha1.GlobalNode, bool) { - for _, globalNode := range globalNodes { - if globalNode.Name == nodeName { - return &globalNode, true - } - } - return nil, false -} - -func GenerateKubeclient(virtualCluster *v1alpha1.VirtualCluster) (kubernetes.Interface, error) { - if len(virtualCluster.Spec.Kubeconfig) == 0 { - return nil, fmt.Errorf("virtualcluster %s kubeconfig is empty", virtualCluster.Name) - } - kubeconfigStream, err := base64.StdEncoding.DecodeString(virtualCluster.Spec.Kubeconfig) - if err != nil { - return nil, fmt.Errorf("virtualcluster %s decode target kubernetes kubeconfig %s err: %v", virtualCluster.Name, virtualCluster.Spec.Kubeconfig, err) - } - - config, err := utils.NewConfigFromBytes(kubeconfigStream) - if err != nil { - return nil, fmt.Errorf("generate kubernetes config failed: %s", err) - } - - k8sClient, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("generate K8s basic client failed: %v", err) - } - - return k8sClient, nil -} - -func GetFirstIP(ipNetStrs string) ([]net.IP, error) { - ipNetStrArray := strings.Split(ipNetStrs, ",") - if len(ipNetStrArray) > 2 { - return nil, fmt.Errorf("getFirstIP failed, ipstring is too long: %s", ipNetStrs) - } - - var ips []net.IP - for _, ipNetStr := range ipNetStrArray { - ip, ipNet, err := net.ParseCIDR(ipNetStr) - if err != nil { - return nil, fmt.Errorf("parse ipNetStr failed: %s", err) - } - - networkIP := ip.Mask(ipNet.Mask) - - // IPv4 - if ip.To4() != nil { - firstIP := make(net.IP, len(networkIP)) - copy(firstIP, networkIP) - firstIP[len(firstIP)-1]++ - ips = append(ips, firstIP) - continue - } - - // IPv6 - firstIP := make(net.IP, len(networkIP)) - copy(firstIP, networkIP) - for i := len(firstIP) - 1; i >= 0; i-- { - firstIP[i]++ - if firstIP[i] != 0 { - break - } - } - ips = append(ips, firstIP) - } - return ips, nil -} - -func IPV6First(ipNetStr string) (bool, error) { - ipNetStrArray := strings.Split(ipNetStr, ",") - if len(ipNetStrArray) > 2 { - return false, fmt.Errorf("getFirstIP failed, ipstring is too long: %s", ipNetStr) - } - return utils.IsIPv6(ipNetStrArray[0]), nil -} - -// parseCIDR returns a channel that generates IP addresses in the CIDR range. -func parseCIDR(cidr string) (chan string, error) { - ip, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return nil, err - } - ch := make(chan string) - go func() { - for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { - ch <- ip.String() - } - close(ch) - }() - return ch, nil -} - -// inc increments an IP address. -func inc(ip net.IP) { - for j := len(ip) - 1; j >= 0; j-- { - ip[j]++ - if ip[j] > 0 { - break - } - } -} - -// parseRange returns a channel that generates IP addresses in the range. -func parseRange(ipRange string) (chan string, error) { - parts := strings.Split(ipRange, "-") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid IP range format: %s", ipRange) - } - startIP := net.ParseIP(parts[0]) - endIP := net.ParseIP(parts[1]) - if startIP == nil || endIP == nil { - return nil, fmt.Errorf("invalid IP address in range: %s", ipRange) - } - - ch := make(chan string) - go func() { - for ip := startIP; !ip.Equal(endIP); inc(ip) { - ch <- ip.String() - } - ch <- endIP.String() - close(ch) - }() - return ch, nil -} - -// ParseVIPPool returns a channel that generates IP addresses from the vipPool. -func parseVIPPool(vipPool []string) (chan string, error) { - ch := make(chan string) - go func() { - defer close(ch) - for _, entry := range vipPool { - entry = strings.TrimSpace(entry) - var ipCh chan string - var err error - if strings.Contains(entry, "/") { - ipCh, err = parseCIDR(entry) - } else if strings.Contains(entry, "-") { - ipCh, err = parseRange(entry) - } else { - ip := net.ParseIP(entry) - if ip == nil { - err = fmt.Errorf("invalid IP address: %s", entry) - } else { - ipCh = make(chan string, 1) - ipCh <- entry - close(ipCh) - } - } - if err != nil { - fmt.Println("Error:", err) - return - } - for ip := range ipCh { - ch <- ip - } - } - }() - return ch, nil -} - -// FindAvailableIP finds an available IP address from vipPool that is not in allocatedVips. -func FindAvailableIP(vipPool, allocatedVips []string) (string, error) { - allocatedSet := make(map[string]struct{}) - for _, ip := range allocatedVips { - allocatedSet[ip] = struct{}{} - } - - ipCh, err := parseVIPPool(vipPool) - if err != nil { - return "", err - } - - for ip := range ipCh { - if _, allocated := allocatedSet[ip]; !allocated { - return ip, nil - } - } - - return "", fmt.Errorf("no available IP addresses") -} - -// Seed the random number generator using crypto/rand -func SecureRandomInt(n int) (int, error) { - bigN := big.NewInt(int64(n)) - randInt, err := rand.Int(rand.Reader, bigN) - if err != nil { - return 0, err - } - return int(randInt.Int64()), nil -} - -func IsIPAvailable(ips, vipPool []string) (string, error) { - for _, ip := range ips { - if b, err := IsIPInRange(ip, vipPool); b && err == nil { - return ip, nil - } - } - return "", fmt.Errorf("specified IP not available in the VIP pool") -} - -// IsIPInRange checks if the given IP is in any of the provided IP ranges -func IsIPInRange(ipStr string, ranges []string) (bool, error) { - ip := net.ParseIP(ipStr) - if ip == nil { - return false, fmt.Errorf("invalid IP address: %s", ipStr) - } - - for _, r := range ranges { - if strings.Contains(r, "/") { - // Handle CIDR notation - _, ipNet, err := net.ParseCIDR(r) - if err != nil { - return false, fmt.Errorf("invalid CIDR notation: %s", r) - } - if ipNet.Contains(ip) { - return true, nil - } - } else if strings.Contains(r, "-") { - // Handle IP range notation - ips := strings.Split(r, "-") - if len(ips) != 2 { - return false, fmt.Errorf("invalid range notation: %s", r) - } - startIP := net.ParseIP(strings.TrimSpace(ips[0])) - endIP := net.ParseIP(strings.TrimSpace(ips[1])) - if startIP == nil || endIP == nil { - return false, fmt.Errorf("invalid IP range: %s", r) - } - if compareIPs(ip, startIP) >= 0 && compareIPs(ip, endIP) <= 0 { - return true, nil - } - } else { - return false, fmt.Errorf("invalid IP range or CIDR format: %s", r) - } - } - - return false, nil -} - -// compareIPs compares two IP addresses, returns -1 if ip1 < ip2, 1 if ip1 > ip2, and 0 if they are equal -func compareIPs(ip1, ip2 net.IP) int { - if ip1.To4() != nil && ip2.To4() != nil { - return compareBytes(ip1.To4(), ip2.To4()) - } - return compareBytes(ip1, ip2) -} - -// compareBytes compares two byte slices, returns -1 if a < b, 1 if a > b, and 0 if they are equal -func compareBytes(a, b []byte) int { - for i := 0; i < len(a) && i < len(b); i++ { - if a[i] < b[i] { - return -1 - } - if a[i] > b[i] { - return 1 - } - } - if len(a) < len(b) { - return -1 - } - if len(a) > len(b) { - return 1 - } - return 0 -} diff --git a/pkg/kubenest/util/util_test.go b/pkg/kubenest/util/util_test.go deleted file mode 100644 index 4ec053e73..000000000 --- a/pkg/kubenest/util/util_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package util - -import ( - "fmt" - "testing" - - "gopkg.in/yaml.v3" -) - -func TestFindAvailableIP(t *testing.T) { - type args struct { - vipPool []string - allocatedVips []string - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "test1", - args: args{ - vipPool: []string{"192.168.0.1", "192.168.0.2", "192.168.0.3"}, - allocatedVips: []string{"192.168.0.1", "192.168.0.2"}, - }, - want: "192.168.0.3", - wantErr: false, - }, - { - name: "test2", - args: args{ - vipPool: []string{ - "192.168.0.1", - "192.168.0.2-192.168.0.10", - "192.168.1.0/24", - "2001:db8::1", - "2001:db8::1-2001:db8::10", - "2001:db8::/64", - }, - allocatedVips: []string{"192.168.0.1", "192.168.0.2"}, - }, - want: "192.168.0.3", - wantErr: false, - }, - { - name: "test3", - args: args{ - vipPool: []string{ - "192.168.6.110-192.168.6.120", - }, - allocatedVips: []string{}, - }, - want: "192.168.6.110", - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := FindAvailableIP(tt.args.vipPool, tt.args.allocatedVips) - fmt.Printf("got vip : %v", got) - if (err != nil) != tt.wantErr { - t.Errorf("FindAvailableIP() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("FindAvailableIP() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestFindAvailableIP2(_ *testing.T) { - type HostPortPool struct { - PortsPool []int32 `yaml:"portsPool"` - } - type VipPool struct { - Vip []string `yaml:"vipPool"` - } - var vipPool VipPool - var hostPortPool HostPortPool - yamlData2 := ` -portsPool: - - 33001 - - 33002 - - 33003 - - 33004 - - 33005 - - 33006 - - 33007 - - 33008 - - 33009 - - 33010 -` - yamlData := ` -vipPool: - - 192.168.6.110-192.168.6.120 -` - if err := yaml.Unmarshal([]byte(yamlData), &vipPool); err != nil { - panic(err) - } - if err := yaml.Unmarshal([]byte(yamlData2), &hostPortPool); err != nil { - panic(err) - } - fmt.Printf("vipPool: %v", vipPool) -} diff --git a/pkg/kubenest/workflow/phase.go b/pkg/kubenest/workflow/phase.go deleted file mode 100644 index e3f3e74a0..000000000 --- a/pkg/kubenest/workflow/phase.go +++ /dev/null @@ -1,99 +0,0 @@ -package workflow - -import "k8s.io/klog/v2" - -type Phase struct { - Tasks []Task - runData RunData - runDataInitializer func() (RunData, error) -} - -type Task struct { - Name string - Run func(RunData) error - Skip func(RunData) (bool, error) - Tasks []Task - RunSubTasks bool -} - -type RunData = interface{} - -func NewPhase() *Phase { - return &Phase{ - Tasks: []Task{}, - } -} - -func (p *Phase) AppendTask(t Task) { - p.Tasks = append(p.Tasks, t) -} - -func (p *Phase) initData() (RunData, error) { - if p.runData == nil && p.runDataInitializer != nil { - var err error - if p.runData, err = p.runDataInitializer(); err != nil { - klog.ErrorS(err, "failed to initialize running data") - return nil, err - } - } - - return p.runData, nil -} - -func (p *Phase) SetDataInitializer(build func() (RunData, error)) { - p.runDataInitializer = build -} - -func (p *Phase) Run() error { - runData := p.runData - if runData == nil { - if _, err := p.initData(); err != nil { - return err - } - } - - for _, t := range p.Tasks { - if err := run(t, p.runData); err != nil { - return err - } - } - - return nil -} - -func (p *Phase) Init() error { - runData := p.runData - if runData == nil { - if _, err := p.initData(); err != nil { - return err - } - } - return nil -} - -func run(t Task, data RunData) error { - if t.Skip != nil { - skip, err := t.Skip(data) - if err != nil { - return err - } - if skip { - return nil - } - } - - if t.Run != nil { - if err := t.Run(data); err != nil { - return err - } - if t.RunSubTasks { - for _, p := range t.Tasks { - if err := run(p, data); err != nil { - return err - } - } - } - } - - return nil -}