diff --git a/commands/cluster_command_launcher.go b/commands/cluster_command_launcher.go index 6fdc2eb..bca77d3 100644 --- a/commands/cluster_command_launcher.go +++ b/commands/cluster_command_launcher.go @@ -91,9 +91,8 @@ const ( stopNodeFlag = "stop-hosts" reIPFileFlag = "re-ip-file" removeNodeFlag = "remove" - // VER-90436: restart -> start - startNodeFlag = "restart" - startHostFlag = "start-hosts" + startNodeFlag = "start" + startHostFlag = "start-hosts" ) // Flag and key for database replication @@ -169,7 +168,7 @@ const ( startSCSubCmd = "start_subcluster" stopNodeCmd = "stop_node" removeNodeSubCmd = "remove_node" - restartNodeSubCmd = "restart_node" + startNodeSubCmd = "start_node" reIPSubCmd = "re_ip" sandboxSubCmd = "sandbox_subcluster" unsandboxSubCmd = "unsandbox_subcluster" @@ -527,7 +526,7 @@ func constructCmds() []*cobra.Command { makeCmdSandboxSubcluster(), makeCmdUnsandboxSubcluster(), // node-scope cmds - makeCmdRestartNodes(), + makeCmdStartNodes(), makeCmdAddNode(), makeCmdStopNode(), makeCmdRemoveNode(), diff --git a/commands/cmd_add_node.go b/commands/cmd_add_node.go index 5f97caa..c748d19 100644 --- a/commands/cmd_add_node.go +++ b/commands/cmd_add_node.go @@ -193,18 +193,19 @@ func (c *CmdAddNode) Run(vcc vclusterops.ClusterCommands) error { options := c.addNodeOptions - vdb, addNodeError := vcc.VAddNode(options) - if addNodeError != nil { - return addNodeError + vdb, err := vcc.VAddNode(options) + if err != nil { + vcc.LogError(err, "fail to add node") + return err } // write db info to vcluster config file - err := writeConfig(&vdb, true /*forceOverwrite*/) + err = writeConfig(&vdb, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to write config file, details: %s", err) + vcc.DisplayWarning("fail to write config file, details: %s", err) } - vcc.PrintInfo("Added nodes %v to database %s", c.addNodeOptions.NewHosts, options.DBName) + vcc.DisplayInfo("Successfully added nodes %v to database %s", c.addNodeOptions.NewHosts, options.DBName) return nil } diff --git a/commands/cmd_add_subcluster.go b/commands/cmd_add_subcluster.go index 5328bf2..1b4a77e 100644 --- a/commands/cmd_add_subcluster.go +++ b/commands/cmd_add_subcluster.go @@ -16,8 +16,6 @@ package commands import ( - "fmt" - "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/vertica/vcluster/vclusterops" @@ -203,7 +201,7 @@ func (c *CmdAddSubcluster) Run(vcc vclusterops.ClusterCommands) error { err := vcc.VAddSubcluster(options) if err != nil { - vcc.LogError(err, "failed to add subcluster") + vcc.LogError(err, "fail to add subcluster") return err } @@ -215,24 +213,23 @@ func (c *CmdAddSubcluster) Run(vcc vclusterops.ClusterCommands) error { vdb, err := vcc.VAddNode(&options.VAddNodeOptions) if err != nil { - const msg = "Failed to add nodes into the new subcluster" - vcc.LogError(err, msg) - fmt.Printf("%s\nHint: subcluster %q is successfully created, you should use add_node to add nodes\n", + const msg = "Fail to add nodes into the new subcluster" + vcc.DisplayError("%s\nHint: subcluster %q is successfully created, you should use add_node to add nodes\n", msg, options.VAddNodeOptions.SCName) return err } // update db info in the config file err = writeConfig(&vdb, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to write config file, details: %s", err) + vcc.DisplayWarning("fail to write config file, details: %s", err) } } if len(options.NewHosts) > 0 { - vcc.PrintInfo("Added subcluster %s with nodes %v to database %s", + vcc.DisplayInfo("Successfully added subcluster %s with nodes %v to database %s", options.SCName, options.NewHosts, options.DBName) } else { - vcc.PrintInfo("Added subcluster %s to database %s", options.SCName, options.DBName) + vcc.DisplayInfo("Successfully added subcluster %s to database %s", options.SCName, options.DBName) } return nil } diff --git a/commands/cmd_base.go b/commands/cmd_base.go index ea1d6f9..927d5d3 100644 --- a/commands/cmd_base.go +++ b/commands/cmd_base.go @@ -438,9 +438,9 @@ func (c *CmdBase) writeCmdOutputToFile(f *os.File, output []byte, logger vlog.Pr _, err := f.Write(output) if err != nil { if f == os.Stdout { - logger.PrintWarning("%s", err) + logger.DisplayWarning("%s", err) } else { - logger.PrintWarning("Could not write command output to file %s, details: %s", c.output, err) + logger.DisplayWarning("Could not write command output to file %s, details: %s", c.output, err) } } } diff --git a/commands/cmd_config_recover.go b/commands/cmd_config_recover.go index 74ea2bb..870e62a 100644 --- a/commands/cmd_config_recover.go +++ b/commands/cmd_config_recover.go @@ -115,7 +115,7 @@ func (c *CmdConfigRecover) validateParse(logger vlog.Printer) error { func (c *CmdConfigRecover) Run(vcc vclusterops.ClusterCommands) error { vdb, err := vcc.VFetchCoordinationDatabase(c.recoverConfigOptions) if err != nil { - vcc.LogError(err, "failed to recover the config file") + vcc.LogError(err, "fail to recover the config file") return err } // write db info to vcluster config file @@ -124,7 +124,7 @@ func (c *CmdConfigRecover) Run(vcc vclusterops.ClusterCommands) error { if err != nil { return fmt.Errorf("fail to write config file, details: %s", err) } - vcc.PrintInfo("Recovered config file for database %s at %s", vdb.Name, + vcc.DisplayInfo("Successfully recovered config file for database %s at %s", vdb.Name, c.recoverConfigOptions.ConfigPath) return nil diff --git a/commands/cmd_config_show.go b/commands/cmd_config_show.go index 45e34ca..27ff7d2 100644 --- a/commands/cmd_config_show.go +++ b/commands/cmd_config_show.go @@ -65,12 +65,13 @@ func (c *CmdConfigShow) Parse(inputArgv []string, logger vlog.Printer) error { return nil } -func (c *CmdConfigShow) Run(_ vclusterops.ClusterCommands) error { +func (c *CmdConfigShow) Run(vcc vclusterops.ClusterCommands) error { fileBytes, err := os.ReadFile(dbOptions.ConfigPath) if err != nil { return fmt.Errorf("fail to read config file, details: %w", err) } fmt.Printf("%s", string(fileBytes)) + vcc.DisplayInfo("Successfully read the config file %s", dbOptions.ConfigPath) return nil } diff --git a/commands/cmd_create_connection.go b/commands/cmd_create_connection.go index cb8f116..067aa08 100644 --- a/commands/cmd_create_connection.go +++ b/commands/cmd_create_connection.go @@ -112,7 +112,7 @@ func (c *CmdCreateConnection) Run(vcc vclusterops.ClusterCommands) error { if err != nil { return fmt.Errorf("fail to write connection file, details: %s", err) } - fmt.Printf("Successfully write connection file in %s", globals.connFile) + vcc.DisplayInfo("Successfully wrote the connection file in %s", globals.connFile) return nil } diff --git a/commands/cmd_create_db.go b/commands/cmd_create_db.go index 6bf69bf..c65330b 100644 --- a/commands/cmd_create_db.go +++ b/commands/cmd_create_db.go @@ -16,8 +16,6 @@ package commands import ( - "fmt" - "github.com/spf13/cobra" "github.com/vertica/vcluster/vclusterops" "github.com/vertica/vcluster/vclusterops/util" @@ -211,7 +209,7 @@ func (c *CmdCreateDB) setLocalFlags(cmd *cobra.Command) { &c.createDBOptions.TimeoutNodeStartupSeconds, "startup-timeout", util.DefaultTimeoutSeconds, - "The timeout to wait for the nodes to start", + "The timeout in seconds to wait for the nodes to start", ) } @@ -275,20 +273,22 @@ func (c *CmdCreateDB) Run(vcc vclusterops.ClusterCommands) error { vcc.V(1).Info("Called method Run()") vdb, createError := vcc.VCreateDatabase(c.createDBOptions) if createError != nil { + vcc.LogError(createError, "fail to create database") return createError } + vcc.DisplayInfo("Successfully created a database with name [%s]", vdb.Name) + // write db info to vcluster config file err := writeConfig(&vdb, c.createDBOptions.ForceOverwriteFile) if err != nil { - fmt.Printf("Warning: Fail to write config file, details: %s\n", err) + vcc.DisplayWarning("Fail to write config file, details: %s\n", err) } // write config parameters to vcluster config param file err = c.writeConfigParam(c.createDBOptions.ConfigurationParameters, c.createDBOptions.ForceOverwriteFile) if err != nil { - vcc.PrintWarning("fail to write config param file, details: %s", err) + vcc.DisplayWarning("fail to write config param file, details: %s", err) } - vcc.PrintInfo("Created a database with name [%s]", vdb.Name) return nil } diff --git a/commands/cmd_drop_db.go b/commands/cmd_drop_db.go index 7c21b53..ecb9b14 100644 --- a/commands/cmd_drop_db.go +++ b/commands/cmd_drop_db.go @@ -66,25 +66,12 @@ Examples: []string{dbNameFlag, configFlag, hostsFlag, ipv6Flag, catalogPathFlag, dataPathFlag, depotPathFlag}, ) - // local flags - newCmd.setLocalFlags(cmd) - // hide flags since we expect it to come from config file, not from user input hideLocalFlags(cmd, []string{hostsFlag, catalogPathFlag, dataPathFlag, depotPathFlag}) return cmd } -// setLocalFlags will set the local flags the command has -func (c *CmdDropDB) setLocalFlags(cmd *cobra.Command) { - cmd.Flags().BoolVar( - &c.dropDBOptions.ForceDelete, - "force-delete", - false, - "Delete local directories like catalog, depot, and data.", - ) -} - func (c *CmdDropDB) Parse(inputArgv []string, logger vlog.Printer) error { c.argv = inputArgv logger.LogArgParse(&c.argv) @@ -106,17 +93,17 @@ func (c *CmdDropDB) Run(vcc vclusterops.ClusterCommands) error { err := vcc.VDropDatabase(c.dropDBOptions) if err != nil { - vcc.LogError(err, "failed do drop the database") + vcc.LogError(err, "fail do drop the database") return err } - vcc.PrintInfo("Successfully dropped database %s", c.dropDBOptions.DBName) + vcc.DisplayInfo("Successfully dropped database %s", c.dropDBOptions.DBName) // if the database is successfully dropped, the config file will be removed // if failed to remove it, we will ask users to manually do it err = removeConfig() if err != nil { - vcc.PrintWarning("Fail to remove config file %q, "+ - "please manually do it. Details: %v", c.dropDBOptions.ConfigPath, err) + vcc.DisplayWarning("Fail to remove config file %q, "+ + "please manually do it, details: %v", c.dropDBOptions.ConfigPath, err) } return nil } diff --git a/commands/cmd_install_packages.go b/commands/cmd_install_packages.go index 8d24947..caccf24 100644 --- a/commands/cmd_install_packages.go +++ b/commands/cmd_install_packages.go @@ -117,7 +117,7 @@ func (c *CmdInstallPackages) Run(vcc vclusterops.ClusterCommands) error { status, err := vcc.VInstallPackages(options) if err != nil { - vcc.LogError(err, "failed to install the packages") + vcc.LogError(err, "fail to install the packages") return err } @@ -129,7 +129,7 @@ func (c *CmdInstallPackages) Run(vcc vclusterops.ClusterCommands) error { c.writeCmdOutputToFile(globals.file, bytes, vcc.GetLog()) vcc.LogInfo("Installed the packages: ", "packages", string(bytes)) - + vcc.DisplayInfo("Successfully installed packages") return nil } diff --git a/commands/cmd_list_all_nodes.go b/commands/cmd_list_all_nodes.go index e8494f8..014b114 100644 --- a/commands/cmd_list_all_nodes.go +++ b/commands/cmd_list_all_nodes.go @@ -105,7 +105,7 @@ func (c *CmdListAllNodes) Run(vcc vclusterops.ClusterCommands) error { // if all nodes are down, the nodeStates list is not empty // for this case, we don't want to show errors but show DOWN for the nodes if len(nodeStates) == 0 { - vcc.PrintError("fail to list all nodes: %s", err) + vcc.LogError(err, "fail to list all nodes") return err } } @@ -117,6 +117,7 @@ func (c *CmdListAllNodes) Run(vcc vclusterops.ClusterCommands) error { c.writeCmdOutputToFile(globals.file, bytes, vcc.GetLog()) vcc.LogInfo("Node states: ", "nodeStates", string(bytes)) + vcc.DisplayInfo("Successfully listed all nodes") return nil } diff --git a/commands/cmd_re_ip.go b/commands/cmd_re_ip.go index 385e4ce..0beddad 100644 --- a/commands/cmd_re_ip.go +++ b/commands/cmd_re_ip.go @@ -16,8 +16,6 @@ package commands import ( - "fmt" - "github.com/spf13/cobra" "github.com/vertica/vcluster/vclusterops" "github.com/vertica/vcluster/vclusterops/vlog" @@ -46,7 +44,7 @@ func makeCmdReIP() *cobra.Command { `This command changes the IP addresses of database nodes in the catalog. The database must be down to change the IP addresses with re_ip. If -the database is up, you must run restart_node after re_ip for the +the database is up, you must run start_node after re_ip for the IP changes to take effect. The file specified by the re-ip-file option must be a JSON file in the @@ -138,14 +136,14 @@ func (c *CmdReIP) Run(vcc vclusterops.ClusterCommands) error { return err } - vcc.PrintInfo("Re-ip is successfully completed") + vcc.DisplayInfo("Successfully changed the IP addresses of database nodes") // update config file after running re_ip if canUpdateConfig { c.UpdateConfig(dbConfig) err = dbConfig.write(options.ConfigPath, true /*forceOverwrite*/) if err != nil { - fmt.Printf("Warning: fail to update config file, details %v\n", err) + vcc.DisplayWarning("fail to update config file, details %v\n", err) } } diff --git a/commands/cmd_remove_node.go b/commands/cmd_remove_node.go index 2abd473..c70d43e 100644 --- a/commands/cmd_remove_node.go +++ b/commands/cmd_remove_node.go @@ -81,12 +81,6 @@ func (c *CmdRemoveNode) setLocalFlags(cmd *cobra.Command) { []string{}, "Comma-separated list of host(s) to remove from the database", ) - cmd.Flags().BoolVar( - &c.removeNodeOptions.ForceDelete, - "force-delete", - true, - "Whether to force clean-up of existing directories if they are not empty", - ) } func (c *CmdRemoveNode) Parse(inputArgv []string, logger vlog.Printer) error { @@ -140,15 +134,16 @@ func (c *CmdRemoveNode) Run(vcc vclusterops.ClusterCommands) error { vdb, err := vcc.VRemoveNode(options) if err != nil { + vcc.LogError(err, "fail to remove node") return err } // write db info to vcluster config file err = writeConfig(&vdb, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to write config file, details: %s", err) + vcc.DisplayWarning("fail to write config file, details: %s", err) } - vcc.PrintInfo("Successfully removed nodes %v from database %s", c.removeNodeOptions.HostsToRemove, options.DBName) + vcc.DisplayInfo("Successfully removed nodes %v from database %s", c.removeNodeOptions.HostsToRemove, options.DBName) return nil } diff --git a/commands/cmd_remove_subcluster.go b/commands/cmd_remove_subcluster.go index 68b4c75..42a6889 100644 --- a/commands/cmd_remove_subcluster.go +++ b/commands/cmd_remove_subcluster.go @@ -82,12 +82,6 @@ func (c *CmdRemoveSubcluster) setLocalFlags(cmd *cobra.Command) { "", "Name of subcluster to be removed", ) - cmd.Flags().BoolVar( - &c.removeScOptions.ForceDelete, - "force-delete", - true, - "Whether force delete directories if they are not empty", - ) } func (c *CmdRemoveSubcluster) Parse(inputArgv []string, logger vlog.Printer) error { @@ -130,16 +124,18 @@ func (c *CmdRemoveSubcluster) Run(vcc vclusterops.ClusterCommands) error { vdb, err := vcc.VRemoveSubcluster(options) if err != nil { + vcc.LogError(err, "fail to remove subcluster") return err } + vcc.DisplayInfo("Successfully removed subcluster %s from database %s", + options.SCName, options.DBName) + // write db info to vcluster config file err = writeConfig(&vdb, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to write config file, details: %s", err) + vcc.DisplayWarning("fail to write config file, details: %s", err) } - vcc.PrintInfo("Successfully removed subcluster %s from database %s", - options.SCName, options.DBName) return nil } diff --git a/commands/cmd_restart_node.go b/commands/cmd_restart_node.go index ca44185..3925cad 100644 --- a/commands/cmd_restart_node.go +++ b/commands/cmd_restart_node.go @@ -24,13 +24,13 @@ import ( "github.com/vertica/vcluster/vclusterops/vlog" ) -/* CmdRestartNodes +/* CmdStartNodes * * Implements ClusterCommand interface */ -type CmdRestartNodes struct { +type CmdStartNodes struct { CmdBase - restartNodesOptions *vclusterops.VStartNodesOptions + startNodesOptions *vclusterops.VStartNodesOptions // comma-separated list of vnode=host vnodeHostMap map[string]string @@ -39,40 +39,40 @@ type CmdRestartNodes struct { rawStartHostList []string } -func makeCmdRestartNodes() *cobra.Command { - // CmdRestartNodes - newCmd := &CmdRestartNodes{} +func makeCmdStartNodes() *cobra.Command { + // CmdStartNodes + newCmd := &CmdStartNodes{} opt := vclusterops.VStartNodesOptionsFactory() - newCmd.restartNodesOptions = &opt + newCmd.startNodesOptions = &opt cmd := makeBasicCobraCmd( newCmd, - restartNodeSubCmd, - "Restart nodes in the database", + startNodeSubCmd, + "Start nodes in the database", `This command starts individual nodes in a running cluster. This differs from start_db, which starts Vertica after cluster quorum is lost. -You can pass --restart a comma-separated list of NODE_NAME=IP_TO_RESTART pairs -to restart multiple nodes without a config file. If the IP_TO_RESTART value +You can pass --start a comma-separated list of NODE_NAME=IP_TO_START pairs +to start multiple nodes without a config file. If the IP_TO_START value does not match the information stored in the catalog for NODE_NAME, Vertica -updates the catalog with the IP_TO_RESTART value and restarts the node. +updates the catalog with the IP_TO_START value and starts the node. Examples: - # Restart a single node in the database with config file - vcluster restart_node --db-name test_db \ - --restart v_test_db_node0004=10.20.30.43 --password testpassword \ + # Start a single node in the database with config file + vcluster start_node --db-name test_db \ + --start v_test_db_node0004=10.20.30.43 --password testpassword \ --config /opt/vertica/config/vertica_cluster.yaml - # Restart a single node and change its IP address in the database + # Start a single node and change its IP address in the database # with config file (assuming the node IP address previously stored # catalog was not 10.20.30.44) - vcluster restart_node --db-name test_db \ - --restart v_test_db_node0004=10.20.30.44 --password testpassword \ + vcluster start_node --db-name test_db \ + --start v_test_db_node0004=10.20.30.44 --password testpassword \ --config /opt/vertica/config/vertica_cluster.yaml - # Restart multiple nodes in the database with config file - vcluster restart_node --db-name test_db \ - --restart v_test_db_node0003=10.20.30.42,v_test_db_node0004=10.20.30.43 \ + # Start multiple nodes in the database with config file + vcluster start_node --db-name test_db \ + --start v_test_db_node0003=10.20.30.42,v_test_db_node0004=10.20.30.43 \ --password testpassword --config /opt/vertica/config/vertica_cluster.yaml `, []string{dbNameFlag, hostsFlag, ipv6Flag, configFlag, passwordFlag}, @@ -81,19 +81,19 @@ Examples: // local flags newCmd.setLocalFlags(cmd) - // require nodes or hosts to restart + // require nodes or hosts to start markFlagsOneRequired(cmd, []string{startNodeFlag, startHostFlag}) return cmd } // setLocalFlags will set the local flags the command has -func (c *CmdRestartNodes) setLocalFlags(cmd *cobra.Command) { +func (c *CmdStartNodes) setLocalFlags(cmd *cobra.Command) { cmd.Flags().StringToStringVar( &c.vnodeHostMap, startNodeFlag, map[string]string{}, - "Comma-separated list of pairs part of the database nodes that need to be restarted", + "Comma-separated list of pairs part of the database nodes that need to be started", ) cmd.Flags().StringSliceVar( &c.rawStartHostList, @@ -102,85 +102,84 @@ func (c *CmdRestartNodes) setLocalFlags(cmd *cobra.Command) { "Comma-separated list of hosts that need to be started", ) cmd.Flags().IntVar( - &c.restartNodesOptions.StatePollingTimeout, + &c.startNodesOptions.StatePollingTimeout, "timeout", util.DefaultTimeoutSeconds, "The timeout (in seconds) to wait for polling node state operation", ) - // VER-90436: restart -> start - // users only input --restart or --start-hosts + // users only input --start or --start-hosts cmd.MarkFlagsMutuallyExclusive([]string{startNodeFlag, startHostFlag}...) } -func (c *CmdRestartNodes) Parse(inputArgv []string, logger vlog.Printer) error { +func (c *CmdStartNodes) Parse(inputArgv []string, logger vlog.Printer) error { c.argv = inputArgv logger.LogArgParse(&c.argv) // for some options, we do not want to use their default values, // if they are not provided in cli, // reset the value of those options to nil - c.ResetUserInputOptions(&c.restartNodesOptions.DatabaseOptions) + c.ResetUserInputOptions(&c.startNodesOptions.DatabaseOptions) return c.validateParse(logger) } -func (c *CmdRestartNodes) validateParse(logger vlog.Printer) error { +func (c *CmdStartNodes) validateParse(logger vlog.Printer) error { logger.Info("Called validateParse()") - // VER-90436: restart -> start // the node-host map can be loaded from the value of - // either --restart or --start-hosts + // either --start or --start-hosts if len(c.rawStartHostList) > 0 { - err := c.buildRestartNodeHostMap() + err := c.buildStartNodeHostMap() if err != nil { return err } } else { - err := c.restartNodesOptions.ParseNodesList(c.vnodeHostMap) + err := c.startNodesOptions.ParseNodesList(c.vnodeHostMap) if err != nil { return err } } - err := c.getCertFilesFromCertPaths(&c.restartNodesOptions.DatabaseOptions) + err := c.getCertFilesFromCertPaths(&c.startNodesOptions.DatabaseOptions) if err != nil { return err } - err = c.ValidateParseBaseOptions(&c.restartNodesOptions.DatabaseOptions) + err = c.ValidateParseBaseOptions(&c.startNodesOptions.DatabaseOptions) if err != nil { return err } - return c.setDBPassword(&c.restartNodesOptions.DatabaseOptions) + return c.setDBPassword(&c.startNodesOptions.DatabaseOptions) } -func (c *CmdRestartNodes) Run(vcc vclusterops.ClusterCommands) error { +func (c *CmdStartNodes) Run(vcc vclusterops.ClusterCommands) error { vcc.V(1).Info("Called method Run()") - options := c.restartNodesOptions + options := c.startNodesOptions // this is the instruction that will be used by both CLI and operator err := vcc.VStartNodes(options) if err != nil { + vcc.LogError(err, "fail to start node") return err } - var hostToRestart []string + var hostToStart []string for _, ip := range options.Nodes { - hostToRestart = append(hostToRestart, ip) + hostToStart = append(hostToStart, ip) } - vcc.PrintInfo("Successfully restart hosts %s of the database %s", hostToRestart, options.DBName) + vcc.DisplayInfo("Successfully started hosts %s of the database %s", hostToStart, options.DBName) return nil } -// SetDatabaseOptions will assign a vclusterops.DatabaseOptions instance to the one in CmdRestartNodes -func (c *CmdRestartNodes) SetDatabaseOptions(opt *vclusterops.DatabaseOptions) { - c.restartNodesOptions.DatabaseOptions = *opt +// SetDatabaseOptions will assign a vclusterops.DatabaseOptions instance to the one in CmdStartNodes +func (c *CmdStartNodes) SetDatabaseOptions(opt *vclusterops.DatabaseOptions) { + c.startNodesOptions.DatabaseOptions = *opt } -func (c *CmdRestartNodes) buildRestartNodeHostMap() error { +func (c *CmdStartNodes) buildStartNodeHostMap() error { dbConfig, err := readConfig() if err != nil { return fmt.Errorf("--start-hosts can only be used when "+ @@ -193,7 +192,7 @@ func (c *CmdRestartNodes) buildRestartNodeHostMap() error { } for _, rawHost := range c.rawStartHostList { - ip, err := util.ResolveToOneIP(rawHost, c.restartNodesOptions.IPv6) + ip, err := util.ResolveToOneIP(rawHost, c.startNodesOptions.IPv6) if err != nil { return err } @@ -202,7 +201,7 @@ func (c *CmdRestartNodes) buildRestartNodeHostMap() error { return fmt.Errorf("cannot find the address %s (of host %s) from the config file", ip, rawHost) } - c.restartNodesOptions.Nodes[nodeName] = ip + c.startNodesOptions.Nodes[nodeName] = ip } return nil diff --git a/commands/cmd_revive_db.go b/commands/cmd_revive_db.go index 4e21489..ea6c372 100644 --- a/commands/cmd_revive_db.go +++ b/commands/cmd_revive_db.go @@ -188,21 +188,20 @@ func (c *CmdReviveDB) Run(vcc vclusterops.ClusterCommands) error { return nil } + vcc.DisplayInfo("Successfully revived database %s", c.reviveDBOptions.DBName) + // write db info to vcluster config file vdb.FirstStartAfterRevive = true err = writeConfig(vdb, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to write config file, details: %s", err) + vcc.DisplayWarning("fail to write config file, details: %s", err) } // write config parameters to vcluster config param file err = c.writeConfigParam(c.reviveDBOptions.ConfigurationParameters, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to write config param file, details: %s", err) + vcc.DisplayWarning("fail to write config param file, details: %s", err) } - - vcc.PrintInfo("Successfully revived database %s", c.reviveDBOptions.DBName) - return nil } diff --git a/commands/cmd_sandbox.go b/commands/cmd_sandbox.go index 76426cf..3d589ec 100644 --- a/commands/cmd_sandbox.go +++ b/commands/cmd_sandbox.go @@ -143,34 +143,34 @@ func (c *CmdSandboxSubcluster) Analyze(logger vlog.Printer) error { } func (c *CmdSandboxSubcluster) Run(vcc vclusterops.ClusterCommands) error { - vcc.PrintInfo("Running sandbox subcluster") vcc.LogInfo("Calling method Run() for command " + sandboxSubCmd) options := c.sbOptions err := vcc.VSandbox(&options) if err != nil { + vcc.LogError(err, "fail to sandbox subcluster") return err } - defer vcc.PrintInfo("Successfully sandboxed subcluster " + c.sbOptions.SCName + " as " + c.sbOptions.SandboxName) + defer vcc.DisplayInfo("Successfully sandboxed subcluster " + c.sbOptions.SCName + " as " + c.sbOptions.SandboxName) // Read and then update the sandbox information on config file dbConfig, configErr := readConfig() if configErr != nil { - vcc.PrintWarning("fail to read config file, skipping config file update", "error", configErr) + vcc.DisplayWarning("fail to read config file, skipping config file update", "error", configErr) return nil } // Update config updatedConfig := c.updateSandboxInfo(dbConfig) if !updatedConfig { - vcc.PrintWarning("did not update node info for sandboxed sc " + c.sbOptions.SCName + + vcc.DisplayWarning("did not update node info for sandboxed sc " + c.sbOptions.SCName + ", info about the subcluster nodes are missing in config file, skipping config update") return nil } writeErr := dbConfig.write(options.ConfigPath, true /*forceOverwrite*/) if writeErr != nil { - vcc.PrintWarning("fail to write the config file, details: " + writeErr.Error()) + vcc.DisplayWarning("fail to write the config file, details: " + writeErr.Error()) return nil } return nil diff --git a/commands/cmd_scrutinize.go b/commands/cmd_scrutinize.go index f50bd57..dcbe760 100644 --- a/commands/cmd_scrutinize.go +++ b/commands/cmd_scrutinize.go @@ -194,7 +194,6 @@ func (c *CmdScrutinize) setLocalFlags(cmd *cobra.Command) { } func (c *CmdScrutinize) Parse(inputArgv []string, logger vlog.Printer) error { - logger.PrintInfo("Parsing scrutinize command input") c.argv = inputArgv logger.LogMaskedArgParse(c.argv) // for some options, we do not want to use their default values, @@ -233,7 +232,6 @@ func (c *CmdScrutinize) validateParse(logger vlog.Printer) error { } func (c *CmdScrutinize) Run(vcc vclusterops.ClusterCommands) error { - vcc.PrintInfo("Running scrutinize") // TODO remove when no longer needed for tests vcc.LogInfo("Calling method Run()") // Read the password from a secret @@ -259,7 +257,7 @@ func (c *CmdScrutinize) Run(vcc vclusterops.ClusterCommands) error { vcc.LogError(err, "scrutinize run failed") return err } - vcc.PrintInfo("Successfully completed scrutinize run for the database %s", c.sOptions.DBName) + vcc.DisplayInfo("Successfully completed scrutinize run for the database %s", c.sOptions.DBName) return err } @@ -481,7 +479,7 @@ func (c *CmdScrutinize) validateTarballName(logger vlog.Printer) { if re.MatchString(c.sOptions.TarballName) { return } - logger.PrintWarning("The tarball name does not match GRASP regex VerticaScrutinize.yyyymmddhhmmss") + logger.DisplayWarning("The tarball name does not match GRASP regex VerticaScrutinize.yyyymmddhhmmss") } // readNonEmptyFile is a helper that reads the contents of a file into a string. diff --git a/commands/cmd_show_restore_points.go b/commands/cmd_show_restore_points.go index aab138c..8364a10 100644 --- a/commands/cmd_show_restore_points.go +++ b/commands/cmd_show_restore_points.go @@ -180,7 +180,7 @@ func (c *CmdShowRestorePoints) Run(vcc vclusterops.ClusterCommands) error { } c.writeCmdOutputToFile(globals.file, bytes, vcc.GetLog()) - vcc.PrintInfo("Successfully show restore points %v in database %s", restorePoints, options.DBName) + vcc.DisplayInfo("Successfully showed restore points %v in database %s", restorePoints, options.DBName) return nil } diff --git a/commands/cmd_start_db.go b/commands/cmd_start_db.go index 54780ea..166f4aa 100644 --- a/commands/cmd_start_db.go +++ b/commands/cmd_start_db.go @@ -52,7 +52,7 @@ func makeCmdStartDB() *cobra.Command { `This command starts a database on a set of hosts. Starts Vertica on each host and establishes cluster quorum. This command is -similar to restart_node, except start_db assumes that cluster quorum +similar to start_node, except start_db assumes that cluster quorum has been lost. The IP address provided for each node name must match the current IP address @@ -224,7 +224,7 @@ func (c *CmdStartDB) Run(vcc vclusterops.ClusterCommands) error { } options.FirstStartAfterRevive = dbConfig.FirstStartAfterRevive } else { - vcc.PrintWarning("fail to read config file", "error", readConfigErr) + vcc.DisplayWarning("fail to read config file", "error", readConfigErr) if options.MainCluster || options.Sandbox != util.MainClusterSandbox { return fmt.Errorf("cannot start the database partially without config file") } @@ -232,21 +232,22 @@ func (c *CmdStartDB) Run(vcc vclusterops.ClusterCommands) error { vdb, err := vcc.VStartDatabase(options) if err != nil { - vcc.LogError(err, "failed to start the database") + vcc.LogError(err, "fail to start the database") return err } + msg := fmt.Sprintf("Started database %s", options.DBName) if options.Sandbox != "" { sandboxMsg := fmt.Sprintf(" on sandbox %s", options.Sandbox) - vcc.PrintInfo(msg + sandboxMsg) + vcc.DisplayInfo(msg + sandboxMsg) return nil } if options.MainCluster { startMsg := " on the main cluster" - vcc.PrintInfo(msg + startMsg) + vcc.DisplayInfo(msg + startMsg) return nil } - vcc.PrintInfo(msg) + vcc.DisplayInfo(msg) // for Eon database, update config file to fill nodes' subcluster information if readConfigErr == nil && options.IsEon { @@ -254,7 +255,7 @@ func (c *CmdStartDB) Run(vcc vclusterops.ClusterCommands) error { vdb.FirstStartAfterRevive = false err = writeConfig(vdb, true /*forceOverwrite*/) if err != nil { - vcc.PrintWarning("fail to update config file, details: %s", err) + vcc.DisplayWarning("fail to update config file, details: %s", err) } } diff --git a/commands/cmd_start_replication.go b/commands/cmd_start_replication.go index 05e6e4d..481dd98 100644 --- a/commands/cmd_start_replication.go +++ b/commands/cmd_start_replication.go @@ -239,7 +239,7 @@ func (c *CmdStartReplication) Run(vcc vclusterops.ClusterCommands) error { vcc.LogError(err, "fail to replicate to database", "targetDB", options.TargetDB) return err } - vcc.PrintInfo("Successfully replicate to database %s", options.TargetDB) + vcc.DisplayInfo("Successfully replicated to database %s", options.TargetDB) return nil } diff --git a/commands/cmd_start_subcluster.go b/commands/cmd_start_subcluster.go index bd64823..a812a93 100644 --- a/commands/cmd_start_subcluster.go +++ b/commands/cmd_start_subcluster.go @@ -127,10 +127,11 @@ func (c *CmdStartSubcluster) Run(vcc vclusterops.ClusterCommands) error { err := vcc.VStartSubcluster(options) if err != nil { + vcc.LogError(err, "fail to start subcluster") return err } - vcc.PrintInfo("Successfully started subcluster %s for database %s", + vcc.DisplayInfo("Successfully started subcluster %s for database %s", options.SCName, options.DBName) return nil diff --git a/commands/cmd_stop_db.go b/commands/cmd_stop_db.go index 926a85a..4579822 100644 --- a/commands/cmd_stop_db.go +++ b/commands/cmd_stop_db.go @@ -148,21 +148,21 @@ func (c *CmdStopDB) Run(vcc vclusterops.ClusterCommands) error { err := vcc.VStopDatabase(options) if err != nil { - vcc.LogError(err, "failed to stop the database") + vcc.LogError(err, "fail to stop the database") return err } - msg := fmt.Sprintf("Stopped a database with name %s", options.DBName) + msg := fmt.Sprintf("Successfully stopped a database with name %s", options.DBName) if options.SandboxName != "" { sandboxMsg := fmt.Sprintf(" on sandbox %s", options.SandboxName) - vcc.PrintInfo(msg + sandboxMsg) + vcc.DisplayInfo(msg + sandboxMsg) return nil } if options.MainCluster { stopMsg := " on main cluster" - vcc.PrintInfo(msg + stopMsg) + vcc.DisplayInfo(msg + stopMsg) return nil } - vcc.PrintInfo(msg) + vcc.DisplayInfo(msg) return nil } diff --git a/commands/cmd_stop_node.go b/commands/cmd_stop_node.go index c7d6884..12ab084 100644 --- a/commands/cmd_stop_node.go +++ b/commands/cmd_stop_node.go @@ -104,10 +104,10 @@ func (c *CmdStopNode) Run(vcc vclusterops.ClusterCommands) error { err := vcc.VStopNode(options) if err != nil { - vcc.LogError(err, "failed to stop the nodes", "Nodes", c.stopNodeOptions.StopHosts) + vcc.LogError(err, "fail to stop the nodes", "Nodes", c.stopNodeOptions.StopHosts) return err } - vcc.PrintInfo("Successfully stopped the nodes %v", c.stopNodeOptions.StopHosts) + vcc.DisplayInfo("Successfully stopped the nodes %v", c.stopNodeOptions.StopHosts) return nil } diff --git a/commands/cmd_stop_subcluster.go b/commands/cmd_stop_subcluster.go index c2ffa33..000e266 100644 --- a/commands/cmd_stop_subcluster.go +++ b/commands/cmd_stop_subcluster.go @@ -149,10 +149,10 @@ func (c *CmdStopSubcluster) Run(vcc vclusterops.ClusterCommands) error { err := vcc.VStopSubcluster(options) if err != nil { - vcc.LogError(err, "failed to stop the subcluster", "Subcluster", options.SCName) + vcc.LogError(err, "fail to stop the subcluster", "Subcluster", options.SCName) return err } - vcc.PrintInfo("Successfully stopped subcluster %s", options.SCName) + vcc.DisplayInfo("Successfully stopped subcluster %s", options.SCName) return nil } diff --git a/commands/cmd_unsandbox.go b/commands/cmd_unsandbox.go index 3e8507e..703640e 100644 --- a/commands/cmd_unsandbox.go +++ b/commands/cmd_unsandbox.go @@ -125,27 +125,27 @@ func (c *CmdUnsandboxSubcluster) Analyze(logger vlog.Printer) error { } func (c *CmdUnsandboxSubcluster) Run(vcc vclusterops.ClusterCommands) error { - vcc.PrintInfo("Running unsandbox subcluster") vcc.LogInfo("Calling method Run() for command " + unsandboxSubCmd) options := c.usOptions err := vcc.VUnsandbox(&options) if err != nil { + vcc.LogError(err, "fail to unsandbox subcluster") return err } - defer vcc.PrintInfo("Successfully unsandboxed subcluster " + c.usOptions.SCName) + defer vcc.DisplayInfo("Successfully unsandboxed subcluster " + c.usOptions.SCName) // Read and then update the sandbox information on config file dbConfig, configErr := c.resetSandboxInfo() if configErr != nil { - vcc.PrintWarning("fail to update config file : ", "error", configErr) + vcc.DisplayWarning("fail to update config file : ", "error", configErr) return nil } writeErr := dbConfig.write(options.ConfigPath, true /*forceOverwrite*/) if writeErr != nil { - vcc.PrintWarning("fail to write the config file, details: " + writeErr.Error()) + vcc.DisplayWarning("fail to write the config file, details: " + writeErr.Error()) return nil } return nil diff --git a/commands/user_input_test.go b/commands/user_input_test.go index 86313a3..dca531e 100644 --- a/commands/user_input_test.go +++ b/commands/user_input_test.go @@ -125,21 +125,20 @@ func TestCreateConnection(t *testing.T) { assert.Equal(t, hosts, dbConn.TargetHosts[0]) } -// VER-90436: restart -> start func TestStartNode(t *testing.T) { - // either --restart or --start-hosts must be specified - err := simulateVClusterCli("vcluster restart_node") - assert.ErrorContains(t, err, "at least one of the flags in the group [restart start-hosts] is required") + // either --start or --start-hosts must be specified + err := simulateVClusterCli("vcluster start_node") + assert.ErrorContains(t, err, "at least one of the flags in the group [start start-hosts] is required") - // --restart should be followed with the key1=value1,key2=value2 format - err = simulateVClusterCli("vcluster restart_node --restart host1") - assert.ErrorContains(t, err, `"--restart" flag: host1 must be formatted as key=value`) + // --start should be followed with the key1=value1,key2=value2 format + err = simulateVClusterCli("vcluster start_node --start host1") + assert.ErrorContains(t, err, `"--start" flag: host1 must be formatted as key=value`) // --start-hosts should be used with the config file - err = simulateVClusterCli("vcluster restart_node --start-hosts host1") + err = simulateVClusterCli("vcluster start_node --start-hosts host1") assert.ErrorContains(t, err, "--start-hosts can only be used when the config file is available") - // --restart or --start-hosts cannot be both specified - err = simulateVClusterCli("vcluster restart_node --restart node1=host1 --start-hosts host1") - assert.ErrorContains(t, err, "[restart start-hosts] were all set") + // --start or --start-hosts cannot be both specified + err = simulateVClusterCli("vcluster start_node --start node1=host1 --start-hosts host1") + assert.ErrorContains(t, err, "[start start-hosts] were all set") } diff --git a/vclusterops/add_subcluster.go b/vclusterops/add_subcluster.go index c6b2404..b750be6 100644 --- a/vclusterops/add_subcluster.go +++ b/vclusterops/add_subcluster.go @@ -33,8 +33,6 @@ type VAddSubclusterOptions struct { DatabaseOptions // part 2: subcluster info SCName string - SCHosts []string - SCRawHosts []string IsPrimary bool ControlSetSize int CloneSC string @@ -47,8 +45,6 @@ type VAddSubclusterInfo struct { Hosts []string UserName string Password *string - SCName string - SCHosts []string IsPrimary bool ControlSetSize int CloneSC string @@ -108,26 +104,6 @@ func (options *VAddSubclusterOptions) validateExtraOptions(logger vlog.Printer) logger.PrintWarning("option CloneSC is not implemented yet so it will be ignored") } - // verify the hosts of new subcluster does not exist in current database - if len(options.SCHosts) > 0 { - hostSet := make(map[string]struct{}) - for _, host := range options.SCHosts { - hostSet[host] = struct{}{} - } - dupHosts := []string{} - for _, host := range options.Hosts { - if _, exist := hostSet[host]; exist { - dupHosts = append(dupHosts, host) - } - } - if len(dupHosts) > 0 { - return fmt.Errorf("new subcluster has hosts %v which already exist in database %s", dupHosts, options.DBName) - } - - // TODO remove this log after we supported adding subcluster with nodes - logger.PrintWarning("options SCRawHosts and SCHosts are not implemented yet so they will be ignored") - } - return nil } @@ -161,14 +137,6 @@ func (options *VAddSubclusterOptions) analyzeOptions() (err error) { } } - // resolve SCRawHosts to be IP addresses - if len(options.SCRawHosts) > 0 { - options.SCHosts, err = util.ResolveRawHostsToAddresses(options.SCRawHosts, options.IPv6) - if err != nil { - return err - } - } - return nil } diff --git a/vclusterops/cluster_op.go b/vclusterops/cluster_op.go index 6eeb593..d71a51d 100644 --- a/vclusterops/cluster_op.go +++ b/vclusterops/cluster_op.go @@ -496,6 +496,9 @@ type ClusterCommands interface { PrintInfo(msg string, v ...any) PrintWarning(msg string, v ...any) PrintError(msg string, v ...any) + DisplayInfo(msg string, v ...any) + DisplayWarning(msg string, v ...any) + DisplayError(msg string, v ...any) VAddNode(options *VAddNodeOptions) (VCoordinationDatabase, error) VStopNode(options *VStopNodeOptions) error @@ -557,6 +560,18 @@ func (vcc VClusterCommandsLogger) PrintError(msg string, v ...any) { vcc.Log.PrintError(msg, v...) } +func (vcc VClusterCommandsLogger) DisplayInfo(msg string, v ...any) { + vcc.Log.DisplayInfo(msg, v...) +} + +func (vcc VClusterCommandsLogger) DisplayWarning(msg string, v ...any) { + vcc.Log.DisplayWarning(msg, v...) +} + +func (vcc VClusterCommandsLogger) DisplayError(msg string, v ...any) { + vcc.Log.DisplayError(msg, v...) +} + // VClusterCommands passes state around for all top-level administrator commands // (e.g. create db, add node, etc.). type VClusterCommands struct { diff --git a/vclusterops/coordinator_database.go b/vclusterops/coordinator_database.go index ad2dd12..7dd4aa9 100644 --- a/vclusterops/coordinator_database.go +++ b/vclusterops/coordinator_database.go @@ -122,6 +122,7 @@ func (vdb *VCoordinationDatabase) setFromCreateDBOptions(options *VCreateDatabas vdb.HostList = make([]string, len(options.Hosts)) vdb.HostList = options.Hosts vdb.LicensePathOnNode = options.LicensePathOnNode + vdb.Ipv6 = options.IPv6 if options.GetAwsCredentialsFromEnv { err := vdb.getAwsCredentialsFromEnv() diff --git a/vclusterops/drop_db.go b/vclusterops/drop_db.go index adcaa49..b1ff04d 100644 --- a/vclusterops/drop_db.go +++ b/vclusterops/drop_db.go @@ -31,6 +31,7 @@ func VDropDatabaseOptionsFactory() VDropDatabaseOptions { options := VDropDatabaseOptions{} // set default values to the params options.setDefaultValues() + options.ForceDelete = true return options } diff --git a/vclusterops/fetch_node_state.go b/vclusterops/fetch_node_state.go index 06cc65c..c25c79d 100644 --- a/vclusterops/fetch_node_state.go +++ b/vclusterops/fetch_node_state.go @@ -1,8 +1,10 @@ package vclusterops import ( + "errors" "fmt" + "github.com/vertica/vcluster/rfc7807" "github.com/vertica/vcluster/vclusterops/util" ) @@ -71,6 +73,15 @@ func (vcc VClusterCommands) VFetchNodeState(options *VFetchNodeStateOptions) ([] err = vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, util.MainClusterSandbox) if err != nil { vcc.Log.PrintInfo("Error from vdb build: %s", err.Error()) + + rfcError := &rfc7807.VProblem{} + ok := errors.As(err, &rfcError) + if ok { + if rfcError.ProblemID == rfc7807.AuthenticationError { + return nil, err + } + } + return vcc.fetchNodeStateFromDownDB(options) } diff --git a/vclusterops/https_check_db_running_op.go b/vclusterops/https_check_db_running_op.go index e238bb9..4dea376 100644 --- a/vclusterops/https_check_db_running_op.go +++ b/vclusterops/https_check_db_running_op.go @@ -169,7 +169,7 @@ func (op *httpsCheckRunningDBOp) generateHintMessage(host, dbName string) (msg s case DropDB: msg = fmt.Sprintf("%s, please stop the HTTPS service before dropping the existing database.", generalMsg) case ReIP: - msg = fmt.Sprintf("%s, please consider using restart_node to re-ip nodes for the running database.", generalMsg) + msg = fmt.Sprintf("%s, please consider using start_node to re-ip nodes for the running database.", generalMsg) case StopDB, StartDB, ReviveDB, StopSC: msg = fmt.Sprintf("%s.", generalMsg) } diff --git a/vclusterops/https_get_nodes_info_op.go b/vclusterops/https_get_nodes_info_op.go index 6a6f4ca..23d8aa9 100644 --- a/vclusterops/https_get_nodes_info_op.go +++ b/vclusterops/https_get_nodes_info_op.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + "github.com/vertica/vcluster/rfc7807" "github.com/vertica/vcluster/vclusterops/util" ) @@ -113,8 +114,9 @@ func (op *httpsGetNodesInfoOp) processResult(_ *opEngineExecContext) error { op.logResponse(host, result) if result.isUnauthorizedRequest() { - return fmt.Errorf("[%s] wrong password/certificate for https service on host %s", + detail := fmt.Sprintf("[%s] wrong password/certificate for https service on host %s", op.name, host) + return rfc7807.New(rfc7807.AuthenticationError).WithHost(host).WithDetail(detail) } if result.isPassing() { diff --git a/vclusterops/https_poll_node_state_op.go b/vclusterops/https_poll_node_state_op.go index d360f49..1eeb267 100644 --- a/vclusterops/https_poll_node_state_op.go +++ b/vclusterops/https_poll_node_state_op.go @@ -40,7 +40,7 @@ func (cmd CmdType) String() string { case StartDBCmd: return "start_db" case StartNodeCmd: - return "restart_node" + return "start_node" case CreateDBCmd: return "create_db" } diff --git a/vclusterops/https_re_ip_op.go b/vclusterops/https_re_ip_op.go index 42fb880..e257f2b 100644 --- a/vclusterops/https_re_ip_op.go +++ b/vclusterops/https_re_ip_op.go @@ -78,14 +78,6 @@ func makeHTTPSReIPOpWithHosts(hosts, nodeNamesToReIP, hostToReIP []string, } func (op *httpsReIPOp) setupClusterHTTPRequest(hostsToReIP []string) error { - // At this point there must be more up nodes than hosts to re-ip. - // Failure to meet that requirement would most likely mean that we have lost - // quorum and a cluster restart is needed - if len(op.hosts) < len(hostsToReIP) && op.forStartNodeCommand { - return &ReIPNoClusterQuorumError{ - Detail: fmt.Sprintf("[%s] %d up nodes are not enough for re-ip", op.name, len(op.hosts)), - } - } for i, host := range hostsToReIP { httpRequest := hostHTTPRequest{} httpRequest.Method = PutMethod diff --git a/vclusterops/nma_download_config.go b/vclusterops/nma_download_config.go index 0b4a2d1..e929989 100644 --- a/vclusterops/nma_download_config.go +++ b/vclusterops/nma_download_config.go @@ -110,9 +110,9 @@ func (op *nmaDownloadConfigOp) prepare(execContext *opEngineExecContext) error { // If vdb contains nodes' info, we will check if there are any primary up nodes. // If we found any primary up nodes, we set catalogPathMap based on their info in vdb. } else { - // This case is used for restarting nodes operation. + // This case is used for starting nodes operation. // Otherwise, we set catalogPathMap from the catalog editor (start_db, create_db). - // For restartNodes, If the sourceConfigHost input is a nil value, we find any UP primary nodes as source host to update the host input. + // For startNodes, If the sourceConfigHost input is a nil value, we find any UP primary nodes as source host to update the host input. // we update the catalogPathMap for next download operation's steps from node information by using HTTPS /v1/nodes var primaryUpHosts []string for host, vnode := range op.vdb.HostNodeMap { diff --git a/vclusterops/nma_upload_config.go b/vclusterops/nma_upload_config.go index 42a7ba1..b5c918c 100644 --- a/vclusterops/nma_upload_config.go +++ b/vclusterops/nma_upload_config.go @@ -107,7 +107,7 @@ func (op *nmaUploadConfigOp) setupClusterHTTPRequest(hosts []string) error { func (op *nmaUploadConfigOp) prepare(execContext *opEngineExecContext) error { op.catalogPathMap = make(map[string]string) // If any node's info is available, we set catalogPathMap from node's info. - // This case is used for restarting nodes operation. + // This case is used for starting nodes operation. // Otherwise, we set catalogPathMap from the catalog editor (start_db, create_db). if op.vdb == nil || len(op.vdb.HostNodeMap) == 0 { nmaVDB := execContext.nmaVDatabase diff --git a/vclusterops/remove_subcluster.go b/vclusterops/remove_subcluster.go index 8fc1886..b4cf83d 100644 --- a/vclusterops/remove_subcluster.go +++ b/vclusterops/remove_subcluster.go @@ -44,6 +44,8 @@ func VRemoveScOptionsFactory() VRemoveScOptions { // set default values to the params options.setDefaultValues() + options.ForceDelete = true + return options } diff --git a/vclusterops/replication.go b/vclusterops/replication.go index 9d04a85..4f65308 100644 --- a/vclusterops/replication.go +++ b/vclusterops/replication.go @@ -163,9 +163,9 @@ func (vcc VClusterCommands) VReplicateDatabase(options *VReplicationDatabaseOpti return err } - // retrieve information from the database to accurately determine the state of each node in both the main cluster and andbox + // retrieve information from the database to accurately determine the state of each node in both the main cluster and a given sandbox vdb := makeVCoordinationDatabase() - err = vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, AnySandbox) + err = vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, options.SandboxName) if err != nil { return err } diff --git a/vclusterops/start_node.go b/vclusterops/start_node.go index 998f721..1cbe0a7 100644 --- a/vclusterops/start_node.go +++ b/vclusterops/start_node.go @@ -73,7 +73,7 @@ func (options *VStartNodesOptions) setDefaultValues() { } func (options *VStartNodesOptions) validateRequiredOptions(logger vlog.Printer) error { - err := options.validateBaseOptions(commandRestartNode, logger) + err := options.validateBaseOptions(commandStartNode, logger) if err != nil { return err } @@ -124,7 +124,7 @@ func (options *VStartNodesOptions) validateAnalyzeOptions(logger vlog.Printer) e } func (vcc VClusterCommands) startNodePreCheck(vdb *VCoordinationDatabase, options *VStartNodesOptions, - hostNodeNameMap map[string]string, restartNodeInfo *VStartNodesInfo) error { + hostNodeNameMap map[string]string, startNodeInfo *VStartNodesInfo) error { // sandboxs and the main cluster are not aware of each other's status // so check to make sure nodes to start are either // 1. all in the same sandbox, or @@ -144,7 +144,7 @@ func (vcc VClusterCommands) startNodePreCheck(vdb *VCoordinationDatabase, option return fmt.Errorf(`cannot start nodes in different sandboxes, the sandbox-node map of the nodes to start is: %v`, sandboxNodeMap) } for k := range sandboxNodeMap { - restartNodeInfo.Sandbox = k + startNodeInfo.Sandbox = k } return nil } @@ -180,22 +180,22 @@ func (vcc VClusterCommands) VStartNodes(options *VStartNodesOptions) error { } hostNodeNameMap := make(map[string]string) - restartNodeInfo := new(VStartNodesInfo) + startNodeInfo := new(VStartNodesInfo) for _, vnode := range vdb.HostNodeMap { hostNodeNameMap[vnode.Name] = vnode.Address } // precheck to make sure the nodes to start are either all sandboxed nodes in one sandbox or all main cluster nodes - err = vcc.startNodePreCheck(&vdb, options, hostNodeNameMap, restartNodeInfo) + err = vcc.startNodePreCheck(&vdb, options, hostNodeNameMap, startNodeInfo) if err != nil { return err } - // sandboxes may have different catalog from the main cluster, update the vdb build from the sandbox of the nodes to restart - err = vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, restartNodeInfo.Sandbox) + // sandboxes may have different catalog from the main cluster, update the vdb build from the sandbox of the nodes to start + err = vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, startNodeInfo.Sandbox) if err != nil { - if restartNodeInfo.Sandbox != util.MainClusterSandbox { - return errors.Join(err, fmt.Errorf("hint: make sure there is at least one UP node in the sandbox %s", restartNodeInfo.Sandbox)) + if startNodeInfo.Sandbox != util.MainClusterSandbox { + return errors.Join(err, fmt.Errorf("hint: make sure there is at least one UP node in the sandbox %s", startNodeInfo.Sandbox)) } return errors.Join(err, fmt.Errorf("hint: make sure there is at least one UP node in the database")) } @@ -203,10 +203,10 @@ func (vcc VClusterCommands) VStartNodes(options *VStartNodesOptions) error { // find out hosts // - that need to re-ip, and // - that don't need to re-ip - hostsNoNeedToReIP := options.separateHostsBasedOnReIPNeed(hostNodeNameMap, restartNodeInfo, &vdb, vcc.Log) + hostsNoNeedToReIP := options.separateHostsBasedOnReIPNeed(hostNodeNameMap, startNodeInfo, &vdb, vcc.Log) // check primary node count is more than nodes to re-ip, specially for sandboxes - err = options.checkQuorum(&vdb, restartNodeInfo) + err = options.checkQuorum(&vdb, startNodeInfo) if err != nil { return err } @@ -214,28 +214,28 @@ func (vcc VClusterCommands) VStartNodes(options *VStartNodesOptions) error { // for the hosts that don't need to re-ip, // if none of them is down and no other nodes to re-ip, // we will early stop as there is no need to start them - if !restartNodeInfo.hasDownNodeNoNeedToReIP && len(restartNodeInfo.ReIPList) == 0 { + if !startNodeInfo.hasDownNodeNoNeedToReIP && len(startNodeInfo.ReIPList) == 0 { const msg = "The provided nodes are either not in catalog or already up. There is nothing to start." fmt.Println(msg) vcc.Log.Info(msg) return nil } - // we can proceed to restart both nodes with and without IP changes - restartNodeInfo.HostsToStart = append(restartNodeInfo.HostsToStart, restartNodeInfo.ReIPList...) - restartNodeInfo.HostsToStart = append(restartNodeInfo.HostsToStart, hostsNoNeedToReIP...) + // we can proceed to start both nodes with and without IP changes + startNodeInfo.HostsToStart = append(startNodeInfo.HostsToStart, startNodeInfo.ReIPList...) + startNodeInfo.HostsToStart = append(startNodeInfo.HostsToStart, hostsNoNeedToReIP...) // If no nodes found to start. We can simply exit here. This can happen if // given a list of nodes that aren't in the catalog any longer. - if len(restartNodeInfo.HostsToStart) == 0 { + if len(startNodeInfo.HostsToStart) == 0 { const msg = "None of the nodes provided are in the catalog. There is nothing to start." fmt.Println(msg) vcc.Log.Info(msg) return nil } - // produce restart_node instructions - instructions, err := vcc.produceStartNodesInstructions(restartNodeInfo, options, &vdb) + // produce start_node instructions + instructions, err := vcc.produceStartNodesInstructions(startNodeInfo, options, &vdb) if err != nil { return fmt.Errorf("fail to produce instructions, %w", err) } @@ -247,7 +247,7 @@ func (vcc VClusterCommands) VStartNodes(options *VStartNodesOptions) error { // Give the instructions to the VClusterOpEngine to run err = clusterOpEngine.run(vcc.Log) if err != nil { - return fmt.Errorf("fail to restart node, %w", err) + return fmt.Errorf("fail to start node, %w", err) } return nil } @@ -256,25 +256,35 @@ func (vcc VClusterCommands) VStartNodes(options *VStartNodesOptions) error { // even when a sandbox node is reip'ed func (options *VStartNodesOptions) checkQuorum(vdb *VCoordinationDatabase, restartNodeInfo *VStartNodesInfo) error { sandboxPrimaryUpNodes := []string{} + var lenOfPrimaryReIPLIst int + reIPMap := make(map[string]bool, len(restartNodeInfo.ReIPList)) + for _, name := range restartNodeInfo.NodeNamesToStart { + reIPMap[name] = true + } for _, vnode := range vdb.HostNodeMap { - if vnode.IsPrimary && vnode.State == util.NodeUpState && vnode.Sandbox == restartNodeInfo.Sandbox { - sandboxPrimaryUpNodes = append(sandboxPrimaryUpNodes, vnode.Address) + if vnode.IsPrimary { + if vnode.State == util.NodeUpState && vnode.Sandbox == restartNodeInfo.Sandbox { + sandboxPrimaryUpNodes = append(sandboxPrimaryUpNodes, vnode.Address) + } + if reIPMap[vnode.Name] { + lenOfPrimaryReIPLIst++ + } } } - if len(sandboxPrimaryUpNodes) <= len(restartNodeInfo.ReIPList) { + if len(sandboxPrimaryUpNodes) <= lenOfPrimaryReIPLIst { return &ReIPNoClusterQuorumError{ - Detail: fmt.Sprintf("Quorum check failed: %d up node(s) is/are not enough to re-ip %d node(s)", - len(sandboxPrimaryUpNodes), len(restartNodeInfo.ReIPList)), + Detail: fmt.Sprintf("Quorum check failed: %d up node(s) is/are not enough to re-ip %d primary node(s)", + len(sandboxPrimaryUpNodes), lenOfPrimaryReIPLIst), } } return nil } // produceStartNodesInstructions will build a list of instructions to execute for -// the restart_node command. +// the start_node command. // // The generated instructions will later perform the following operations necessary -// for a successful restart_node: +// for a successful start_node: // - Check NMA connectivity // - Get UP nodes through HTTPS call, if any node is UP then the DB is UP and ready for starting nodes // - If need to do re-ip: @@ -284,9 +294,9 @@ func (options *VStartNodesOptions) checkQuorum(vdb *VCoordinationDatabase, resta // 4. Call https /v1/nodes to update nodes' info // - Check Vertica versions // - Use any UP primary nodes as source host for syncing spread.conf and vertica.conf -// - Sync the confs to the nodes to be restarted -// - Call https /v1/startup/command to get restart command of the nodes to be restarted -// - restart nodes +// - Sync the confs to the nodes to be started +// - Call https /v1/startup/command to get start command of the nodes to be started +// - start nodes // - Poll node start up // - sync catalog func (vcc VClusterCommands) produceStartNodesInstructions(startNodeInfo *VStartNodesInfo, options *VStartNodesOptions, @@ -310,8 +320,8 @@ func (vcc VClusterCommands) produceStartNodesInstructions(startNodeInfo *VStartN &httpsGetUpNodesOp, ) - // If we identify any nodes that need re-IP, HostsToRestart will contain the nodes that need re-IP. - // Otherwise, HostsToRestart will consist of all hosts with IPs recorded in the catalog, which are provided by user input. + // If we identify any nodes that need re-IP, HostsToStart will contain the nodes that need re-IP. + // Otherwise, HostsToStart will consist of all hosts with IPs recorded in the catalog, which are provided by user input. if len(startNodeInfo.ReIPList) != 0 { nmaNetworkProfileOp := makeNMANetworkProfileOp(startNodeInfo.ReIPList) httpsReIPOp, e := makeHTTPSReIPOp(startNodeInfo.NodeNamesToStart, startNodeInfo.ReIPList, @@ -359,7 +369,7 @@ func (vcc VClusterCommands) produceStartNodesInstructions(startNodeInfo *VStartN return instructions, err } - nmaRestartNewNodesOp := makeNMAStartNodeOpWithVDB(startNodeInfo.HostsToStart, options.StartUpConf, vdb) + nmaStartNewNodesOp := makeNMAStartNodeOpWithVDB(startNodeInfo.HostsToStart, options.StartUpConf, vdb) httpsPollNodeStateOp, err := makeHTTPSPollNodeStateOpWithTimeoutAndCommand(startNodeInfo.HostsToStart, options.usePassword, options.UserName, options.Password, options.StatePollingTimeout, StartNodeCmd) if err != nil { @@ -368,7 +378,7 @@ func (vcc VClusterCommands) produceStartNodesInstructions(startNodeInfo *VStartN instructions = append(instructions, &httpsRestartUpCommandOp, - &nmaRestartNewNodesOp, + &nmaStartNewNodesOp, &httpsPollNodeStateOp, ) @@ -386,7 +396,7 @@ func (vcc VClusterCommands) produceStartNodesInstructions(startNodeInfo *VStartN func (options *VStartNodesOptions) separateHostsBasedOnReIPNeed( hostNodeNameMap map[string]string, - restartNodeInfo *VStartNodesInfo, + startNodeInfo *VStartNodesInfo, vdb *VCoordinationDatabase, logger vlog.Printer) (hostsNoNeedToReIP []string) { for nodename, newIP := range options.Nodes { @@ -401,16 +411,16 @@ func (options *VStartNodesOptions) separateHostsBasedOnReIPNeed( } // if the IP that is given is different than the IP in the catalog, a re-ip is necessary if oldIP != newIP { - restartNodeInfo.ReIPList = append(restartNodeInfo.ReIPList, newIP) - restartNodeInfo.NodeNamesToStart = append(restartNodeInfo.NodeNamesToStart, nodename) - logger.Info("the nodes need to be re-IP", "nodeNames", restartNodeInfo.NodeNamesToStart, "IPs", restartNodeInfo.ReIPList) + startNodeInfo.ReIPList = append(startNodeInfo.ReIPList, newIP) + startNodeInfo.NodeNamesToStart = append(startNodeInfo.NodeNamesToStart, nodename) + logger.Info("the nodes need to be re-IP", "nodeNames", startNodeInfo.NodeNamesToStart, "IPs", startNodeInfo.ReIPList) } else { // otherwise, we don't need to re-ip hostsNoNeedToReIP = append(hostsNoNeedToReIP, newIP) vnode, ok := vdb.HostNodeMap[newIP] if ok && vnode.State == util.NodeDownState { - restartNodeInfo.hasDownNodeNoNeedToReIP = true + startNodeInfo.hasDownNodeNoNeedToReIP = true } } } diff --git a/vclusterops/unsandbox.go b/vclusterops/unsandbox.go index 7fd8e35..23cc446 100644 --- a/vclusterops/unsandbox.go +++ b/vclusterops/unsandbox.go @@ -271,7 +271,7 @@ func (vcc *VClusterCommands) produceUnsandboxSCInstructions(options *VUnsandboxO } // Start the nodes - nmaRestartNodesOp := makeNMAStartNodeOpAfterUnsandbox("") + nmaStartNodesOp := makeNMAStartNodeOpAfterUnsandbox("") // Poll for nodes UP httpsPollScUp, err := makeHTTPSPollSubclusterNodeStateUpOp(scHosts, options.SCName, @@ -283,7 +283,7 @@ func (vcc *VClusterCommands) produceUnsandboxSCInstructions(options *VUnsandboxO instructions = append(instructions, &nmaVersionCheck, &httpsStartUpCommandOp, - &nmaRestartNodesOp, + &nmaStartNodesOp, &httpsPollScUp, ) } diff --git a/vclusterops/vcluster_database_options.go b/vclusterops/vcluster_database_options.go index ef6feb5..bdcbfd8 100644 --- a/vclusterops/vcluster_database_options.go +++ b/vclusterops/vcluster_database_options.go @@ -92,7 +92,7 @@ const ( commandAddNode = "add_node" commandRemoveNode = "remove_node" commandStopNode = "stop_node" - commandRestartNode = "restart_node" + commandStartNode = "start_node" commandAddSubcluster = "add_subcluster" commandRemoveSubcluster = "remove_subcluster" commandStopSubcluster = "stop_subcluster" diff --git a/vclusterops/vlog/printer.go b/vclusterops/vlog/printer.go index 79ea477..c69efe2 100644 --- a/vclusterops/vlog/printer.go +++ b/vclusterops/vlog/printer.go @@ -19,6 +19,8 @@ import ( "fmt" "os" "strings" + "unicode" + "unicode/utf8" "github.com/fatih/color" "github.com/go-logr/logr" @@ -101,6 +103,36 @@ func (p *Printer) PrintWarning(msg string, v ...any) { p.printlnCond(WarningLog, fmsg) } +// DisplayInfo will display the given message in the log. And if not logging to +// stdout, it will repeat the message to the console. +func (p *Printer) DisplayInfo(msg string, v ...any) { + fmsg := fmt.Sprintf(msg, v...) + fmsg = firstLetterToUpper(fmsg) + escapedFmsg := escapeSpecialCharacters(fmsg) + p.Log.Info(escapedFmsg) + p.println(InfoLog, fmsg) +} + +// DisplayError will display the given error message in the log. And if not +// logging to stdout, it will repeat the message to the console. +func (p *Printer) DisplayError(msg string, v ...any) { + fmsg := fmt.Sprintf(msg, v...) + fmsg = firstLetterToLower(fmsg) + escapedFmsg := escapeSpecialCharacters(fmsg) + p.Log.Error(nil, escapedFmsg) + p.println(ErrorLog, fmsg) +} + +// DisplayWarning will display the given warning message in the log. And if not +// logging to stdout, it will repeat the message to the console. +func (p *Printer) DisplayWarning(msg string, v ...any) { + fmsg := fmt.Sprintf(msg, v...) + fmsg = firstLetterToUpper(fmsg) + escapedFmsg := escapeSpecialCharacters(fmsg) + p.Log.Info(escapedFmsg) + p.println(WarningLog, fmsg) +} + // escapeSpecialCharacters will escape special characters (tabs or newlines) in the message. // Messages that are typically meant for the console could have tabs and newlines for alignment. // We want to escape those when writing the message to the log so that each log entry is exactly one line long. @@ -110,6 +142,22 @@ func escapeSpecialCharacters(message string) string { return message } +func firstLetterToUpper(message string) string { + if message == "" { + return message + } + r, size := utf8.DecodeRuneInString(message) + return string(unicode.ToUpper(r)) + message[size:] +} + +func firstLetterToLower(message string) string { + if message == "" { + return message + } + r, size := utf8.DecodeRuneInString(message) + return string(unicode.ToLower(r)) + message[size:] +} + // printlnCond will conditonally print a message to the console if logging to a file func (p *Printer) printlnCond(label, msg string) { // Message is only printed if we are logging to a file only. Otherwise, it @@ -119,6 +167,11 @@ func (p *Printer) printlnCond(label, msg string) { } } +// println will print a message to the console +func (p *Printer) println(label, msg string) { + fmt.Printf("%s%s\n", label, msg) +} + // log functions for specific cases. func (p *Printer) LogArgParse(inputArgv *[]string) { fmsg := fmt.Sprintf("Called method Parse with args: %q.", *inputArgv)